diff --git a/.dockerignore b/.dockerignore index 0fbe460c..11f1a350 100644 --- a/.dockerignore +++ b/.dockerignore @@ -26,11 +26,20 @@ Thumbs.db ci_scripts/ # Documentation +docs/ *.md !README.md -# Configs (copy selectively if needed) +# Non-build project files +completions/ configs/ - -# License +Justfile +clippy.toml +rustfmt.toml +flake.nix +flake.lock LICENSE + +# Docker compose files (not needed inside build) +docker-compose*.yml +**/docker-compose*.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 50b11c62..b327aaae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,6 +11,10 @@ on: - "**.md" - "!.github/workflows/*.yml" +permissions: + contents: read + pull-requests: read + name: General jobs: @@ -19,7 +23,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - name: Install nightly toolchain for rustfmt run: rustup install nightly --profile minimal --component rustfmt @@ -32,7 +36,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - name: Install taplo-cli run: cargo install --locked taplo-cli @@ -45,7 +49,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - name: Install active toolchain run: rustup install @@ -61,7 +65,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - name: Install cargo-deny run: cargo install --locked cargo-deny @@ -77,7 +81,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - uses: ./.github/actions/install-system-deps @@ -106,7 +110,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - uses: ./.github/actions/install-system-deps @@ -134,7 +138,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - uses: ./.github/actions/install-system-deps @@ -156,33 +160,35 @@ jobs: RUST_LOG: "info" run: cargo nextest run -p integration_tests -- --skip tps_test --skip indexer - integration-tests-indexer: - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: actions/checkout@v5 - with: - ref: ${{ github.head_ref }} + # # TODO: Bring this back once we find the source of the errors. + # # + # integration-tests-indexer: + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # steps: + # - uses: actions/checkout@v5 + # with: + # ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - - uses: ./.github/actions/install-system-deps + # - uses: ./.github/actions/install-system-deps - - uses: ./.github/actions/install-risc0 + # - uses: ./.github/actions/install-risc0 - - uses: ./.github/actions/install-logos-blockchain-circuits - with: - github-token: ${{ secrets.GITHUB_TOKEN }} + # - uses: ./.github/actions/install-logos-blockchain-circuits + # with: + # github-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install active toolchain - run: rustup install + # - name: Install active toolchain + # run: rustup install - - name: Install nextest - run: cargo install --locked cargo-nextest + # - name: Install nextest + # run: cargo install --locked cargo-nextest - - name: Run tests - env: - RISC0_DEV_MODE: "1" - RUST_LOG: "info" - run: cargo nextest run -p integration_tests indexer -- --skip tps_test + # - name: Run tests + # env: + # RISC0_DEV_MODE: "1" + # RUST_LOG: "info" + # run: cargo nextest run -p integration_tests indexer -- --skip tps_test valid-proof-test: runs-on: ubuntu-latest @@ -190,7 +196,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - uses: ./.github/actions/install-system-deps @@ -216,7 +222,7 @@ jobs: steps: - uses: actions/checkout@v5 with: - ref: ${{ github.head_ref }} + ref: ${{ github.event.pull_request.head.sha || github.head_ref }} - uses: ./.github/actions/install-risc0 diff --git a/.github/workflows/publish_images.yml b/.github/workflows/publish_images.yml index 619a6209..dbf6a68d 100644 --- a/.github/workflows/publish_images.yml +++ b/.github/workflows/publish_images.yml @@ -12,12 +12,12 @@ jobs: strategy: matrix: include: - - name: sequencer_runner - dockerfile: ./sequencer_runner/Dockerfile + - name: sequencer_service + dockerfile: ./sequencer/service/Dockerfile build_args: | STANDALONE=false - - name: sequencer_runner-standalone - dockerfile: ./sequencer_runner/Dockerfile + - name: sequencer_service-standalone + dockerfile: ./sequencer/service/Dockerfile build_args: | STANDALONE=true - name: indexer_service @@ -50,7 +50,7 @@ jobs: type=ref,event=pr type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - type=sha,prefix={{branch}}- + type=sha,prefix=sha- type=raw,value=latest,enable={{is_default_branch}} - name: Build and push Docker image diff --git a/.gitignore b/.gitignore index e4898102..b265e9aa 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ data/ .idea/ .vscode/ rocksdb -sequencer_runner/data/ +sequencer/service/data/ storage.json result wallet-ffi/wallet_ffi.h diff --git a/Cargo.lock b/Cargo.lock index 6bb8255c..9e1d157c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,229 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "actix" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" -dependencies = [ - "actix-macros", - "actix-rt", - "actix_derive", - "bitflags 2.11.0", - "bytes", - "crossbeam-channel", - "futures-core", - "futures-sink", - "futures-task", - "futures-util", - "log", - "once_cell", - "parking_lot", - "pin-project-lite", - "smallvec", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-codec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" -dependencies = [ - "bitflags 2.11.0", - "bytes", - "futures-core", - "futures-sink", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-cors" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa239b93927be1ff123eebada5a3ff23e89f0124ccb8609234e5103d5a5ae6d" -dependencies = [ - "actix-utils", - "actix-web", - "derive_more", - "futures-util", - "log", - "once_cell", - "smallvec", -] - -[[package]] -name = "actix-http" -version = "3.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f860ee6746d0c5b682147b2f7f8ef036d4f92fe518251a3a35ffa3650eafdf0e" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "bitflags 2.11.0", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "foldhash", - "futures-core", - "http 0.2.12", - "httparse", - "httpdate", - "itoa", - "language-tags", - "mime", - "percent-encoding", - "pin-project-lite", - "smallvec", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" -dependencies = [ - "quote", - "syn 2.0.117", -] - -[[package]] -name = "actix-router" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f8c75c51892f18d9c46150c5ac7beb81c95f78c8b83a634d49f4ca32551fe7" -dependencies = [ - "bytestring", - "cfg-if", - "http 0.2.12", - "regex-lite", - "serde", - "tracing", -] - -[[package]] -name = "actix-rt" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63" -dependencies = [ - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a65064ea4a457eaf07f2fba30b4c695bf43b721790e9530d26cb6f9019ff7502" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "futures-util", - "mio", - "socket2 0.5.10", - "tokio", - "tracing", -] - -[[package]] -name = "actix-service" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e46f36bf0e5af44bdc4bdb36fbbd421aa98c79a9bce724e1edeb3894e10dc7f" -dependencies = [ - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "actix-utils" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" -dependencies = [ - "local-waker", - "pin-project-lite", -] - -[[package]] -name = "actix-web" -version = "4.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff87453bc3b56e9b2b23c1cc0b1be8797184accf51d2abe0f8a33ec275d316bf" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "bytes", - "bytestring", - "cfg-if", - "derive_more", - "encoding_rs", - "foldhash", - "futures-core", - "futures-util", - "impl-more", - "itoa", - "language-tags", - "log", - "mime", - "once_cell", - "pin-project-lite", - "regex-lite", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2 0.6.3", - "time", - "tracing", - "url", -] - -[[package]] -name = "actix-web-codegen" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" -dependencies = [ - "actix-router", - "proc-macro2", - "quote", - "syn 2.0.117", -] - -[[package]] -name = "actix_derive" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6ac1e58cded18cb28ddc17143c4dea5345b3ad575e14f32f66e4054a56eb271" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "addchain" version = "0.2.1" @@ -852,9 +629,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "astral-tokio-tar" -version = "0.5.6" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5" +checksum = "3c23f3af104b40a3430ccb90ed5f7bd877a8dc5c26fc92fde51a22b40890dcf9" dependencies = [ "filetime", "futures-core", @@ -950,6 +727,24 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "ata_core" +version = "0.1.0" +dependencies = [ + "nssa_core", + "risc0-zkvm", + "serde", +] + +[[package]] +name = "ata_program" +version = "0.1.0" +dependencies = [ + "ata_core", + "nssa_core", + "token_core", +] + [[package]] name = "atomic-polyfill" version = "1.0.3" @@ -1011,7 +806,7 @@ dependencies = [ "axum-core 0.4.5", "bytes", "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "hyper", @@ -1045,7 +840,7 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "hyper", @@ -1080,7 +875,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "mime", @@ -1099,7 +894,7 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", - "http 1.4.0", + "http", "http-body", "http-body-util", "mime", @@ -1224,19 +1019,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" -[[package]] -name = "bitcoin-io" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dee39a0ee5b4095224a0cfc6bf4cc1baf0f9624b96b367e53b66d974e51d953" - [[package]] name = "bitcoin_hashes" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" dependencies = [ - "bitcoin-io", "hex-conservative", ] @@ -1313,7 +1101,7 @@ dependencies = [ "futures-util", "hex", "home", - "http 1.4.0", + "http", "http-body-util", "hyper", "hyper-named-pipe", @@ -1466,15 +1254,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "bytestring" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "113b4343b5f6617e7ad401ced8de3cc8b012e73a594347c307b90db3e9271289" -dependencies = [ - "bytes", -] - [[package]] name = "bzip2-sys" version = "0.1.13+1.0.8" @@ -1683,6 +1462,14 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +[[package]] +name = "clock_core" +version = "0.1.0" +dependencies = [ + "borsh", + "nssa_core", +] + [[package]] name = "cobs" version = "0.3.0" @@ -1732,20 +1519,16 @@ dependencies = [ "anyhow", "base64 0.22.1", "borsh", - "bytesize", + "clock_core", "hex", "log", "logos-blockchain-common-http-client", "nssa", "nssa_core", - "reqwest", "serde", - "serde_json", "serde_with", "sha2", "thiserror 2.0.18", - "tokio-retry", - "url", ] [[package]] @@ -1877,15 +1660,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "convert_case" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "convert_case" version = "0.11.0" @@ -1992,15 +1766,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" -[[package]] -name = "crossbeam-channel" -version = "0.5.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -2297,7 +2062,6 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ - "convert_case 0.10.0", "proc-macro2", "quote", "rustc_version", @@ -3099,7 +2863,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http 1.4.0", + "http", "js-sys", "pin-project", "serde", @@ -3163,7 +2927,7 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.4.0", + "http", "indexmap 2.13.0", "slab", "tokio", @@ -3318,17 +3082,6 @@ dependencies = [ "utf8-width", ] -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - [[package]] name = "http" version = "1.4.0" @@ -3346,7 +3099,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.4.0", + "http", ] [[package]] @@ -3357,7 +3110,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.4.0", + "http", "http-body", "pin-project-lite", ] @@ -3432,7 +3185,7 @@ dependencies = [ "futures-channel", "futures-core", "h2", - "http 1.4.0", + "http", "http-body", "httparse", "httpdate", @@ -3465,7 +3218,7 @@ version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http 1.4.0", + "http", "hyper", "hyper-util", "log", @@ -3516,14 +3269,14 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.4.0", + "http", "http-body", "hyper", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.3", + "socket2", "system-configuration", "tokio", "tower-service", @@ -3684,12 +3437,6 @@ dependencies = [ "icu_properties", ] -[[package]] -name = "impl-more" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2" - [[package]] name = "include_bytes_aligned" version = "0.1.4" @@ -3715,6 +3462,7 @@ dependencies = [ "serde_json", "storage", "tempfile", + "testnet_initial_state", "tokio", "url", ] @@ -3725,7 +3473,6 @@ version = "0.1.0" dependencies = [ "anyhow", "arc-swap", - "async-trait", "clap", "env_logger", "futures", @@ -3825,8 +3572,7 @@ name = "integration_tests" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.22.1", - "borsh", + "ata_core", "bytesize", "common", "env_logger", @@ -3839,10 +3585,12 @@ dependencies = [ "nssa", "nssa_core", "sequencer_core", - "sequencer_runner", + "sequencer_service", + "sequencer_service_rpc", "serde_json", "tempfile", "testcontainers", + "testnet_initial_state", "token_core", "tokio", "url", @@ -4048,7 +3796,7 @@ dependencies = [ "futures-channel", "futures-util", "gloo-net", - "http 1.4.0", + "http", "jsonrpsee-core", "pin-project", "rustls", @@ -4073,7 +3821,7 @@ dependencies = [ "bytes", "futures-timer", "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "jsonrpsee-types", @@ -4134,7 +3882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c51b7c290bb68ce3af2d029648148403863b982f138484a73f02a9dd52dbd7f" dependencies = [ "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "hyper", @@ -4160,7 +3908,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc88ff4688e43cc3fa9883a8a95c6fa27aa2e76c96e610b737b6554d650d7fd5" dependencies = [ - "http 1.4.0", + "http", "serde", "serde_json", "thiserror 2.0.18", @@ -4184,7 +3932,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6fceceeb05301cc4c065ab3bd2fa990d41ff4eb44e4ca1b30fa99c057c3e79" dependencies = [ - "http 1.4.0", + "http", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -4232,18 +3980,11 @@ dependencies = [ "nssa", "nssa_core", "rand 0.8.5", - "secp256k1", "serde", "sha2", "thiserror 2.0.18", ] -[[package]] -name = "language-tags" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" - [[package]] name = "lazy-regex" version = "3.6.0" @@ -4620,12 +4361,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" -[[package]] -name = "local-waker" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" - [[package]] name = "lock_api" version = "0.4.14" @@ -5384,7 +5119,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", - "log", "wasi", "windows-sys 0.61.2", ] @@ -5398,7 +5132,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 1.4.0", + "http", "httparse", "memchr", "mime", @@ -5534,17 +5268,19 @@ version = "0.1.0" dependencies = [ "anyhow", "borsh", + "clock_core", "env_logger", "hex", "hex-literal 1.1.0", + "k256", "log", "nssa_core", "rand 0.8.5", "risc0-binfmt", "risc0-build", "risc0-zkvm", - "secp256k1", "serde", + "serde_with", "sha2", "test-case", "test_program_methods", @@ -6148,8 +5884,10 @@ name = "program_deployment" version = "0.1.0" dependencies = [ "clap", + "common", "nssa", "nssa_core", + "sequencer_service_rpc", "tokio", "wallet", ] @@ -6167,6 +5905,9 @@ version = "0.1.0" dependencies = [ "amm_core", "amm_program", + "ata_core", + "ata_program", + "clock_core", "nssa_core", "risc0-zkvm", "serde", @@ -6215,7 +5956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.11.0", "proc-macro2", "quote", "syn 2.0.117", @@ -6228,7 +5969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.11.0", "proc-macro2", "quote", "syn 2.0.117", @@ -6270,7 +6011,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.3", + "socket2", "thiserror 2.0.18", "tokio", "tracing", @@ -6307,9 +6048,9 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.3", + "socket2", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -6581,12 +6322,6 @@ dependencies = [ "regex-syntax", ] -[[package]] -name = "regex-lite" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" - [[package]] name = "regex-syntax" version = "0.8.10" @@ -6606,7 +6341,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 1.4.0", + "http", "http-body", "http-body-util", "hyper", @@ -7210,7 +6945,7 @@ dependencies = [ "security-framework", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -7221,9 +6956,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "ring", "rustls-pki-types", @@ -7355,26 +7090,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3c81b43dc2d8877c216a3fccf76677ee1ebccd429566d3e67447290d0c42b2" -dependencies = [ - "bitcoin_hashes", - "rand 0.9.2", - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb913707158fadaf0d8702c2db0e857de66eb003ccfdda5924b5f5ac98efb38" -dependencies = [ - "cc", -] - [[package]] name = "security-framework" version = "3.7.0" @@ -7428,7 +7143,6 @@ name = "sequencer_core" version = "0.1.0" dependencies = [ "anyhow", - "base58", "bedrock_client", "borsh", "bytesize", @@ -7448,52 +7162,50 @@ dependencies = [ "serde_json", "storage", "tempfile", + "test_program_methods", + "testnet_initial_state", "tokio", "url", ] [[package]] -name = "sequencer_rpc" +name = "sequencer_service" version = "0.1.0" dependencies = [ - "actix-cors", - "actix-web", "anyhow", - "base58", - "base64 0.22.1", - "bedrock_client", "borsh", "bytesize", - "common", - "futures", - "hex", - "itertools 0.14.0", - "log", - "mempool", - "nssa", - "sequencer_core", - "serde", - "serde_json", - "tempfile", - "tokio", -] - -[[package]] -name = "sequencer_runner" -version = "0.1.0" -dependencies = [ - "actix", - "actix-web", - "anyhow", "clap", "common", "env_logger", "futures", "indexer_service_rpc", + "jsonrpsee", "log", + "mempool", + "nssa", "sequencer_core", - "sequencer_rpc", + "sequencer_service_protocol", + "sequencer_service_rpc", "tokio", + "tokio-util", +] + +[[package]] +name = "sequencer_service_protocol" +version = "0.1.0" +dependencies = [ + "common", + "nssa", + "nssa_core", +] + +[[package]] +name = "sequencer_service_rpc" +version = "0.1.0" +dependencies = [ + "jsonrpsee", + "sequencer_service_protocol", ] [[package]] @@ -7689,7 +7401,7 @@ dependencies = [ "const_format", "futures", "gloo-net", - "http 1.4.0", + "http", "http-body-util", "hyper", "inventory", @@ -7826,16 +7538,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "socket2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "socket2" version = "0.6.3" @@ -7855,7 +7557,7 @@ dependencies = [ "base64 0.22.1", "bytes", "futures", - "http 1.4.0", + "http", "httparse", "log", "rand 0.8.5", @@ -8141,15 +7843,17 @@ dependencies = [ name = "test_programs" version = "0.1.0" dependencies = [ + "clock_core", "nssa_core", "risc0-zkvm", + "serde", ] [[package]] name = "testcontainers" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068" +checksum = "0bd36b06a2a6c0c3c81a83be1ab05fe86460d054d4d51bf513bc56b3e15bdc22" dependencies = [ "astral-tokio-tar", "async-trait", @@ -8161,7 +7865,7 @@ dependencies = [ "etcetera", "ferroid", "futures", - "http 1.4.0", + "http", "itertools 0.14.0", "log", "memchr", @@ -8178,6 +7882,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "testnet_initial_state" +version = "0.1.0" +dependencies = [ + "common", + "key_protocol", + "nssa", + "nssa_core", + "serde", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -8321,7 +8036,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.3", + "socket2", "tokio-macros", "windows-sys 0.61.2", ] @@ -8518,7 +8233,7 @@ dependencies = [ "base64 0.22.1", "bytes", "h2", - "http 1.4.0", + "http", "http-body", "http-body-util", "hyper", @@ -8526,7 +8241,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.3", + "socket2", "sync_wrapper", "tokio", "tokio-stream", @@ -8576,7 +8291,7 @@ dependencies = [ "bytes", "futures-core", "futures-util", - "http 1.4.0", + "http", "http-body", "http-body-util", "http-range-header", @@ -8678,7 +8393,7 @@ checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" dependencies = [ "bytes", "data-encoding", - "http 1.4.0", + "http", "httparse", "log", "rand 0.9.2", @@ -8860,7 +8575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" dependencies = [ "base64 0.22.1", - "http 1.4.0", + "http", "httparse", "log", ] @@ -8954,9 +8669,9 @@ dependencies = [ "amm_core", "anyhow", "async-stream", + "ata_core", "base58", - "base64 0.22.1", - "borsh", + "bip39", "clap", "common", "env_logger", @@ -8972,9 +8687,12 @@ dependencies = [ "nssa_core", "optfield", "rand 0.8.5", + "sequencer_service_rpc", "serde", "serde_json", "sha2", + "testnet_initial_state", + "thiserror 2.0.18", "token_core", "tokio", "url", @@ -8985,9 +8703,9 @@ name = "wallet-ffi" version = "0.1.0" dependencies = [ "cbindgen", - "common", "nssa", "nssa_core", + "sequencer_service_rpc", "tempfile", "tokio", "wallet", @@ -9352,15 +9070,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.5", -] - [[package]] name = "windows-sys" version = "0.61.2" @@ -9394,30 +9103,13 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", + "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.53.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -9430,12 +9122,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9448,12 +9134,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9466,24 +9146,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9496,12 +9164,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9514,12 +9176,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" - [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9532,12 +9188,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9550,12 +9200,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" - [[package]] name = "winnow" version = "0.7.15" diff --git a/Cargo.toml b/Cargo.toml index bcd11651..1d9aa707 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,11 +15,15 @@ members = [ "nssa/core", "programs/amm/core", "programs/amm", + "programs/clock/core", "programs/token/core", "programs/token", - "sequencer_core", - "sequencer_rpc", - "sequencer_runner", + "programs/associated_token_account/core", + "programs/associated_token_account", + "sequencer/core", + "sequencer/service", + "sequencer/service/protocol", + "sequencer/service/rpc", "indexer/core", "indexer/service", "indexer/service/protocol", @@ -33,6 +37,7 @@ members = [ "examples/program_deployment/methods", "examples/program_deployment/methods/guest", "bedrock_client", + "testnet_initial_state", ] [workspace.dependencies] @@ -42,21 +47,26 @@ common = { path = "common" } mempool = { path = "mempool" } storage = { path = "storage" } key_protocol = { path = "key_protocol" } -sequencer_core = { path = "sequencer_core" } -sequencer_rpc = { path = "sequencer_rpc" } -sequencer_runner = { path = "sequencer_runner" } +sequencer_core = { path = "sequencer/core" } +sequencer_service_protocol = { path = "sequencer/service/protocol" } +sequencer_service_rpc = { path = "sequencer/service/rpc" } +sequencer_service = { path = "sequencer/service" } indexer_core = { path = "indexer/core" } indexer_service = { path = "indexer/service" } indexer_service_protocol = { path = "indexer/service/protocol" } indexer_service_rpc = { path = "indexer/service/rpc" } wallet = { path = "wallet" } wallet-ffi = { path = "wallet-ffi", default-features = false } +clock_core = { path = "programs/clock/core" } token_core = { path = "programs/token/core" } token_program = { path = "programs/token" } amm_core = { path = "programs/amm/core" } amm_program = { path = "programs/amm" } +ata_core = { path = "programs/associated_token_account/core" } +ata_program = { path = "programs/associated_token_account" } test_program_methods = { path = "test_program_methods" } bedrock_client = { path = "bedrock_client" } +testnet_initial_state = { path = "testnet_initial_state" } tokio = { version = "1.50", features = [ "net", diff --git a/Justfile b/Justfile index b4ec3a98..ac003a15 100644 --- a/Justfile +++ b/Justfile @@ -30,10 +30,10 @@ run-bedrock: docker compose up # Run Sequencer -[working-directory: 'sequencer_runner'] +[working-directory: 'sequencer/service'] run-sequencer: @echo "🧠 Running sequencer" - RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_runner configs/debug + RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_service configs/debug/sequencer_config.json # Run Indexer [working-directory: 'indexer/service'] @@ -62,8 +62,8 @@ run-wallet +args: # Clean runtime data clean: @echo "🧹 Cleaning run artifacts" - rm -rf sequencer_runner/bedrock_signing_key - rm -rf sequencer_runner/rocksdb + rm -rf sequencer/service/bedrock_signing_key + rm -rf sequencer/service/rocksdb rm -rf indexer/service/rocksdb rm -rf wallet/configs/debug/storage.json rm -rf rocksdb diff --git a/README.md b/README.md index 1619747b..a08b81fb 100644 --- a/README.md +++ b/README.md @@ -161,7 +161,7 @@ The sequencer and logos blockchain node can be run locally: - `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json` 3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer: - - `RUST_LOG=info cargo run -p sequencer_runner sequencer_runner/configs/debug` + - `RUST_LOG=info cargo run -p sequencer_service sequencer/service/configs/debug/sequencer_config.json` 4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following: - `cargo install cargo-leptos` - `cargo leptos build --release` @@ -171,8 +171,8 @@ The sequencer and logos blockchain node can be run locally: After stopping services above you need to remove 3 folders to start cleanly: 1. In the `logos-blockchain/logos-blockchain` folder `state` (not needed in case of docker setup) - 2. In the `lssa` folder `sequencer_runner/rocksdb` - 3. In the `lssa` file `sequencer_runner/bedrock_signing_key` + 2. In the `lssa` folder `sequencer/service/rocksdb` + 3. In the `lssa` file `sequencer/service/bedrock_signing_key` 4. In the `lssa` folder `indexer/service/rocksdb` ### Normal mode (`just` commands) @@ -220,7 +220,7 @@ This will use a wallet binary built from this repo and not the one installed in ### Standalone mode The sequencer can be run in standalone mode with: ```bash -RUST_LOG=info cargo run --features standalone -p sequencer_runner sequencer_runner/configs/debug +RUST_LOG=info cargo run --features standalone -p sequencer_service sequencer/service/configs/debug ``` ## Running with Docker diff --git a/artifacts/program_methods/amm.bin b/artifacts/program_methods/amm.bin index d6e08b64..775ec45f 100644 Binary files a/artifacts/program_methods/amm.bin and b/artifacts/program_methods/amm.bin differ diff --git a/artifacts/program_methods/associated_token_account.bin b/artifacts/program_methods/associated_token_account.bin new file mode 100644 index 00000000..917e0dc5 Binary files /dev/null and b/artifacts/program_methods/associated_token_account.bin differ diff --git a/artifacts/program_methods/authenticated_transfer.bin b/artifacts/program_methods/authenticated_transfer.bin index 5dc0bf97..cdce17b9 100644 Binary files a/artifacts/program_methods/authenticated_transfer.bin and b/artifacts/program_methods/authenticated_transfer.bin differ diff --git a/artifacts/program_methods/clock.bin b/artifacts/program_methods/clock.bin new file mode 100644 index 00000000..37a4d30f Binary files /dev/null and b/artifacts/program_methods/clock.bin differ diff --git a/artifacts/program_methods/pinata.bin b/artifacts/program_methods/pinata.bin index cd2ffa53..e18d5c2c 100644 Binary files a/artifacts/program_methods/pinata.bin and b/artifacts/program_methods/pinata.bin differ diff --git a/artifacts/program_methods/pinata_token.bin b/artifacts/program_methods/pinata_token.bin index c6a2a8dd..f2115a68 100644 Binary files a/artifacts/program_methods/pinata_token.bin and b/artifacts/program_methods/pinata_token.bin differ diff --git a/artifacts/program_methods/privacy_preserving_circuit.bin b/artifacts/program_methods/privacy_preserving_circuit.bin index 91dd81d5..21cf0ddb 100644 Binary files a/artifacts/program_methods/privacy_preserving_circuit.bin and b/artifacts/program_methods/privacy_preserving_circuit.bin differ diff --git a/artifacts/program_methods/token.bin b/artifacts/program_methods/token.bin index d047e652..ebb374f3 100644 Binary files a/artifacts/program_methods/token.bin and b/artifacts/program_methods/token.bin differ diff --git a/artifacts/test_program_methods/burner.bin b/artifacts/test_program_methods/burner.bin index 0c0f3089..e2fea8bd 100644 Binary files a/artifacts/test_program_methods/burner.bin and b/artifacts/test_program_methods/burner.bin differ diff --git a/artifacts/test_program_methods/chain_caller.bin b/artifacts/test_program_methods/chain_caller.bin index 1abe0774..d6670787 100644 Binary files a/artifacts/test_program_methods/chain_caller.bin and b/artifacts/test_program_methods/chain_caller.bin differ diff --git a/artifacts/test_program_methods/chain_caller_pda_drop.bin b/artifacts/test_program_methods/chain_caller_pda_drop.bin new file mode 100644 index 00000000..91b42aa2 Binary files /dev/null and b/artifacts/test_program_methods/chain_caller_pda_drop.bin differ diff --git a/artifacts/test_program_methods/changer_claimer.bin b/artifacts/test_program_methods/changer_claimer.bin index 8c24294d..47c4200e 100644 Binary files a/artifacts/test_program_methods/changer_claimer.bin and b/artifacts/test_program_methods/changer_claimer.bin differ diff --git a/artifacts/test_program_methods/claimer.bin b/artifacts/test_program_methods/claimer.bin index 674ca600..8b8bc140 100644 Binary files a/artifacts/test_program_methods/claimer.bin and b/artifacts/test_program_methods/claimer.bin differ diff --git a/artifacts/test_program_methods/clock_chain_caller.bin b/artifacts/test_program_methods/clock_chain_caller.bin new file mode 100644 index 00000000..2faa9b69 Binary files /dev/null and b/artifacts/test_program_methods/clock_chain_caller.bin differ diff --git a/artifacts/test_program_methods/data_changer.bin b/artifacts/test_program_methods/data_changer.bin index bd5ea48a..2ade0385 100644 Binary files a/artifacts/test_program_methods/data_changer.bin and b/artifacts/test_program_methods/data_changer.bin differ diff --git a/artifacts/test_program_methods/extra_output.bin b/artifacts/test_program_methods/extra_output.bin index ab13c315..d0095d2b 100644 Binary files a/artifacts/test_program_methods/extra_output.bin and b/artifacts/test_program_methods/extra_output.bin differ diff --git a/artifacts/test_program_methods/flash_swap_callback.bin b/artifacts/test_program_methods/flash_swap_callback.bin new file mode 100644 index 00000000..f259c5b3 Binary files /dev/null and b/artifacts/test_program_methods/flash_swap_callback.bin differ diff --git a/artifacts/test_program_methods/flash_swap_initiator.bin b/artifacts/test_program_methods/flash_swap_initiator.bin new file mode 100644 index 00000000..f1b67504 Binary files /dev/null and b/artifacts/test_program_methods/flash_swap_initiator.bin differ diff --git a/artifacts/test_program_methods/malicious_authorization_changer.bin b/artifacts/test_program_methods/malicious_authorization_changer.bin index ebe05ff6..75df8bec 100644 Binary files a/artifacts/test_program_methods/malicious_authorization_changer.bin and b/artifacts/test_program_methods/malicious_authorization_changer.bin differ diff --git a/artifacts/test_program_methods/malicious_caller_program_id.bin b/artifacts/test_program_methods/malicious_caller_program_id.bin new file mode 100644 index 00000000..9907ba58 Binary files /dev/null and b/artifacts/test_program_methods/malicious_caller_program_id.bin differ diff --git a/artifacts/test_program_methods/malicious_self_program_id.bin b/artifacts/test_program_methods/malicious_self_program_id.bin new file mode 100644 index 00000000..b530a0b3 Binary files /dev/null and b/artifacts/test_program_methods/malicious_self_program_id.bin differ diff --git a/artifacts/test_program_methods/minter.bin b/artifacts/test_program_methods/minter.bin index 407142e1..392aa2fa 100644 Binary files a/artifacts/test_program_methods/minter.bin and b/artifacts/test_program_methods/minter.bin differ diff --git a/artifacts/test_program_methods/missing_output.bin b/artifacts/test_program_methods/missing_output.bin index 75c7d69e..92998b57 100644 Binary files a/artifacts/test_program_methods/missing_output.bin and b/artifacts/test_program_methods/missing_output.bin differ diff --git a/artifacts/test_program_methods/modified_transfer.bin b/artifacts/test_program_methods/modified_transfer.bin index e78597c9..65475b18 100644 Binary files a/artifacts/test_program_methods/modified_transfer.bin and b/artifacts/test_program_methods/modified_transfer.bin differ diff --git a/artifacts/test_program_methods/nonce_changer.bin b/artifacts/test_program_methods/nonce_changer.bin index 4e7c1f5e..809ed4ec 100644 Binary files a/artifacts/test_program_methods/nonce_changer.bin and b/artifacts/test_program_methods/nonce_changer.bin differ diff --git a/artifacts/test_program_methods/noop.bin b/artifacts/test_program_methods/noop.bin index 3384a65a..9c2fa8bc 100644 Binary files a/artifacts/test_program_methods/noop.bin and b/artifacts/test_program_methods/noop.bin differ diff --git a/artifacts/test_program_methods/pinata_cooldown.bin b/artifacts/test_program_methods/pinata_cooldown.bin new file mode 100644 index 00000000..36e60f9c Binary files /dev/null and b/artifacts/test_program_methods/pinata_cooldown.bin differ diff --git a/artifacts/test_program_methods/program_owner_changer.bin b/artifacts/test_program_methods/program_owner_changer.bin index b88ade13..4dbb34b8 100644 Binary files a/artifacts/test_program_methods/program_owner_changer.bin and b/artifacts/test_program_methods/program_owner_changer.bin differ diff --git a/artifacts/test_program_methods/simple_balance_transfer.bin b/artifacts/test_program_methods/simple_balance_transfer.bin index 8ae149e0..df9bee1d 100644 Binary files a/artifacts/test_program_methods/simple_balance_transfer.bin and b/artifacts/test_program_methods/simple_balance_transfer.bin differ diff --git a/artifacts/test_program_methods/time_locked_transfer.bin b/artifacts/test_program_methods/time_locked_transfer.bin new file mode 100644 index 00000000..8b3da3ea Binary files /dev/null and b/artifacts/test_program_methods/time_locked_transfer.bin differ diff --git a/artifacts/test_program_methods/validity_window.bin b/artifacts/test_program_methods/validity_window.bin new file mode 100644 index 00000000..009bb965 Binary files /dev/null and b/artifacts/test_program_methods/validity_window.bin differ diff --git a/artifacts/test_program_methods/validity_window_chain_caller.bin b/artifacts/test_program_methods/validity_window_chain_caller.bin new file mode 100644 index 00000000..cf9e8af5 Binary files /dev/null and b/artifacts/test_program_methods/validity_window_chain_caller.bin differ diff --git a/bedrock_client/src/lib.rs b/bedrock_client/src/lib.rs index fdd14f72..4e9bfffd 100644 --- a/bedrock_client/src/lib.rs +++ b/bedrock_client/src/lib.rs @@ -46,7 +46,7 @@ impl BedrockClient { info!("Creating Bedrock client with node URL {node_url}"); let client = Client::builder() //Add more fields if needed - .timeout(std::time::Duration::from_secs(60)) + .timeout(std::time::Duration::from_mins(1)) .build() .context("Failed to build HTTP client")?; diff --git a/common/Cargo.toml b/common/Cargo.toml index 8aafed40..dbf5ec0c 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -10,19 +10,15 @@ workspace = true [dependencies] nssa.workspace = true nssa_core.workspace = true +clock_core.workspace = true anyhow.workspace = true thiserror.workspace = true -serde_json.workspace = true serde.workspace = true serde_with.workspace = true -reqwest.workspace = true +base64.workspace = true sha2.workspace = true log.workspace = true hex.workspace = true borsh.workspace = true -bytesize.workspace = true -base64.workspace = true -url.workspace = true logos-blockchain-common-http-client.workspace = true -tokio-retry.workspace = true diff --git a/common/src/block.rs b/common/src/block.rs index 8ef2eb0c..92adbdb1 100644 --- a/common/src/block.rs +++ b/common/src/block.rs @@ -1,14 +1,12 @@ use borsh::{BorshDeserialize, BorshSerialize}; -use nssa::AccountId; +use nssa_core::BlockId; +pub use nssa_core::Timestamp; use serde::{Deserialize, Serialize}; use sha2::{Digest as _, Sha256, digest::FixedOutput as _}; use crate::{HashType, transaction::NSSATransaction}; - pub type MantleMsgId = [u8; 32]; pub type BlockHash = HashType; -pub type BlockId = u64; -pub type TimeStamp = u64; #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct BlockMeta { @@ -36,7 +34,7 @@ pub struct BlockHeader { pub block_id: BlockId, pub prev_block_hash: BlockHash, pub hash: BlockHash, - pub timestamp: TimeStamp, + pub timestamp: Timestamp, pub signature: nssa::Signature, } @@ -60,11 +58,23 @@ pub struct Block { pub bedrock_parent_id: MantleMsgId, } +impl Serialize for Block { + fn serialize(&self, serializer: S) -> Result { + crate::borsh_base64::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for Block { + fn deserialize>(deserializer: D) -> Result { + crate::borsh_base64::deserialize(deserializer) + } +} + #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct HashableBlockData { pub block_id: BlockId, pub prev_block_hash: BlockHash, - pub timestamp: TimeStamp, + pub timestamp: Timestamp, pub transactions: Vec, } @@ -111,20 +121,6 @@ impl From for HashableBlockData { } } -/// Helper struct for account (de-)serialization. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AccountInitialData { - pub account_id: AccountId, - pub balance: u128, -} - -/// Helper struct to (de-)serialize initial commitments. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CommitmentsInitialData { - pub npk: nssa_core::NullifierPublicKey, - pub account: nssa_core::account::Account, -} - #[cfg(test)] mod tests { use crate::{HashType, block::HashableBlockData, test_utils}; diff --git a/common/src/borsh_base64.rs b/common/src/borsh_base64.rs new file mode 100644 index 00000000..2dc7bdec --- /dev/null +++ b/common/src/borsh_base64.rs @@ -0,0 +1,25 @@ +//! This module provides utilities for serializing and deserializing data by combining Borsh and +//! Base64 encodings. + +use base64::{Engine as _, engine::general_purpose::STANDARD}; +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; + +pub fn serialize( + value: &T, + serializer: S, +) -> Result { + let borsh_encoded = borsh::to_vec(value).map_err(serde::ser::Error::custom)?; + let base64_encoded = STANDARD.encode(&borsh_encoded); + Serialize::serialize(&base64_encoded, serializer) +} + +pub fn deserialize<'de, T: BorshDeserialize, D: serde::Deserializer<'de>>( + deserializer: D, +) -> Result { + let base64_encoded = ::deserialize(deserializer)?; + let borsh_encoded = STANDARD + .decode(base64_encoded.as_bytes()) + .map_err(serde::de::Error::custom)?; + borsh::from_slice(&borsh_encoded).map_err(serde::de::Error::custom) +} diff --git a/common/src/error.rs b/common/src/error.rs deleted file mode 100644 index 1e348a32..00000000 --- a/common/src/error.rs +++ /dev/null @@ -1,43 +0,0 @@ -use nssa::AccountId; -use serde::Deserialize; - -use crate::rpc_primitives::errors::RpcError; - -#[derive(Debug, Clone, Deserialize)] -pub struct SequencerRpcError { - pub jsonrpc: String, - pub error: RpcError, - pub id: u64, -} - -#[derive(thiserror::Error, Debug)] -pub enum SequencerClientError { - #[error("HTTP error")] - HTTPError(#[from] reqwest::Error), - #[error("Serde error")] - SerdeError(#[from] serde_json::Error), - #[error("Internal error: {0:?}")] - InternalError(SequencerRpcError), -} - -impl From for SequencerClientError { - fn from(value: SequencerRpcError) -> Self { - Self::InternalError(value) - } -} - -#[derive(Debug, thiserror::Error)] -pub enum ExecutionFailureKind { - #[error("Failed to get data from sequencer")] - SequencerError(#[source] anyhow::Error), - #[error("Inputs amounts does not match outputs")] - AmountMismatchError, - #[error("Accounts key not found")] - KeyNotFoundError, - #[error("Sequencer client error: {0:?}")] - SequencerClientError(#[from] SequencerClientError), - #[error("Can not pay for operation")] - InsufficientFundsError, - #[error("Account {0} data is invalid")] - AccountDataError(AccountId), -} diff --git a/common/src/lib.rs b/common/src/lib.rs index da07a602..a7744d63 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -4,10 +4,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; use serde_with::{DeserializeFromStr, SerializeDisplay}; pub mod block; +mod borsh_base64; pub mod config; -pub mod error; -pub mod rpc_primitives; -pub mod sequencer_client; pub mod transaction; // Module for tests utility functions diff --git a/common/src/rpc_primitives/errors.rs b/common/src/rpc_primitives/errors.rs deleted file mode 100644 index 28ec0b63..00000000 --- a/common/src/rpc_primitives/errors.rs +++ /dev/null @@ -1,194 +0,0 @@ -use std::fmt; - -use serde_json::{Value, to_value}; - -#[derive(serde::Serialize)] -pub struct RpcParseError(pub String); - -/// This struct may be returned from JSON RPC server in case of error. -/// -/// It is expected that that this struct has impls From<_> all other RPC errors -/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError). -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct RpcError { - #[serde(flatten)] - pub error_struct: Option, - /// Deprecated please use the `error_struct` instead. - pub code: i64, - /// Deprecated please use the `error_struct` instead. - pub message: String, - /// Deprecated please use the `error_struct` instead. - #[serde(skip_serializing_if = "Option::is_none")] - pub data: Option, -} - -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(tag = "name", content = "cause", rename_all = "SCREAMING_SNAKE_CASE")] -pub enum RpcErrorKind { - RequestValidationError(RpcRequestValidationErrorKind), - HandlerError(Value), - InternalError(Value), -} - -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")] -pub enum RpcRequestValidationErrorKind { - MethodNotFound { method_name: String }, - ParseError { error_message: String }, -} - -/// A general Server Error. -#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum ServerError { - Timeout, - Closed, -} - -impl RpcError { - /// A generic constructor. - /// - /// Mostly for completeness, doesn't do anything but filling in the corresponding fields. - #[must_use] - pub const fn new(code: i64, message: String, data: Option) -> Self { - Self { - code, - message, - data, - error_struct: None, - } - } - - /// Create an Invalid Param error. - pub fn invalid_params(data: impl serde::Serialize) -> Self { - let value = match to_value(data) { - Ok(value) => value, - Err(err) => { - return Self::server_error(Some(format!( - "Failed to serialize invalid parameters error: {:?}", - err.to_string() - ))); - } - }; - Self::new(-32_602, "Invalid params".to_owned(), Some(value)) - } - - /// Create a server error. - pub fn server_error(e: Option) -> Self { - Self::new( - -32_000, - "Server error".to_owned(), - e.map(|v| to_value(v).expect("Must be representable in JSON")), - ) - } - - /// Create a parse error. - #[must_use] - pub fn parse_error(e: String) -> Self { - Self { - code: -32_700, - message: "Parse error".to_owned(), - data: Some(Value::String(e.clone())), - error_struct: Some(RpcErrorKind::RequestValidationError( - RpcRequestValidationErrorKind::ParseError { error_message: e }, - )), - } - } - - #[must_use] - pub fn serialization_error(e: &str) -> Self { - Self::new_internal_error(Some(Value::String(e.to_owned())), e) - } - - /// Helper method to define extract `INTERNAL_ERROR` in separate `RpcErrorKind` - /// Returns `HANDLER_ERROR` if the error is not internal one. - #[must_use] - pub fn new_internal_or_handler_error(error_data: Option, error_struct: Value) -> Self { - if error_struct["name"] == "INTERNAL_ERROR" { - let error_message = match error_struct["info"].get("error_message") { - Some(Value::String(error_message)) => error_message.as_str(), - _ => "InternalError happened during serializing InternalError", - }; - Self::new_internal_error(error_data, error_message) - } else { - Self::new_handler_error(error_data, error_struct) - } - } - - #[must_use] - pub fn new_internal_error(error_data: Option, info: &str) -> Self { - Self { - code: -32_000, - message: "Server error".to_owned(), - data: error_data, - error_struct: Some(RpcErrorKind::InternalError(serde_json::json!({ - "name": "INTERNAL_ERROR", - "info": serde_json::json!({"error_message": info}) - }))), - } - } - - fn new_handler_error(error_data: Option, error_struct: Value) -> Self { - Self { - code: -32_000, - message: "Server error".to_owned(), - data: error_data, - error_struct: Some(RpcErrorKind::HandlerError(error_struct)), - } - } - - /// Create a method not found error. - #[must_use] - pub fn method_not_found(method: String) -> Self { - Self { - code: -32_601, - message: "Method not found".to_owned(), - data: Some(Value::String(method.clone())), - error_struct: Some(RpcErrorKind::RequestValidationError( - RpcRequestValidationErrorKind::MethodNotFound { - method_name: method, - }, - )), - } - } -} - -impl fmt::Display for RpcError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{self:?}") - } -} - -impl From for RpcError { - fn from(parse_error: RpcParseError) -> Self { - Self::parse_error(parse_error.0) - } -} - -impl From for RpcError { - fn from(_: std::convert::Infallible) -> Self { - // SAFETY: Infallible error can never be constructed, so this code can never be reached. - unsafe { core::hint::unreachable_unchecked() } - } -} - -impl fmt::Display for ServerError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Timeout => write!(f, "ServerError: Timeout"), - Self::Closed => write!(f, "ServerError: Closed"), - } - } -} - -impl From for RpcError { - fn from(e: ServerError) -> Self { - let error_data = match to_value(&e) { - Ok(value) => value, - Err(_err) => { - return Self::new_internal_error(None, "Failed to serialize ServerError"); - } - }; - Self::new_internal_error(Some(error_data), e.to_string().as_str()) - } -} diff --git a/common/src/rpc_primitives/message.rs b/common/src/rpc_primitives/message.rs deleted file mode 100644 index de7f132e..00000000 --- a/common/src/rpc_primitives/message.rs +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2017 tokio-jsonrpc Developers -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! JSON-RPC 2.0 messages. -//! -//! The main entrypoint here is the [Message](enum.Message.html). The others are just building -//! blocks and you should generally work with `Message` instead. -use std::fmt::{Formatter, Result as FmtResult}; - -use serde::{ - de::{Deserializer, Error, Unexpected, Visitor}, - ser::{SerializeStruct as _, Serializer}, -}; -use serde_json::{Result as JsonResult, Value}; - -use super::errors::RpcError; - -pub type Parsed = Result; - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -struct Version; - -impl serde::Serialize for Version { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_str("2.0") - } -} - -impl<'de> serde::Deserialize<'de> for Version { - #[expect( - clippy::renamed_function_params, - reason = "More readable than original serde parameter names" - )] - fn deserialize>(deserializer: D) -> Result { - struct VersionVisitor; - impl Visitor<'_> for VersionVisitor { - type Value = Version; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult { - formatter.write_str("a version string") - } - - fn visit_str(self, value: &str) -> Result { - match value { - "2.0" => Ok(Version), - _ => Err(E::invalid_value(Unexpected::Str(value), &"value 2.0")), - } - } - } - deserializer.deserialize_str(VersionVisitor) - } -} - -/// An RPC request. -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -#[expect( - clippy::partial_pub_fields, - reason = "We don't want to allow access to the version, but the others are public for ease of use" -)] -pub struct Request { - jsonrpc: Version, - pub method: String, - #[serde(default, skip_serializing_if = "Value::is_null")] - pub params: Value, - pub id: Value, -} - -impl Request { - #[must_use] - pub fn from_payload_version_2_0(method: String, payload: serde_json::Value) -> Self { - Self { - jsonrpc: Version, - method, - params: payload, - // ToDo: Correct checking of id - id: 1.into(), - } - } - - /// Answer the request with a (positive) reply. - /// - /// The ID is taken from the request. - #[must_use] - pub fn reply(&self, reply: Value) -> Message { - Message::Response(Response { - jsonrpc: Version, - result: Ok(reply), - id: self.id.clone(), - }) - } - - /// Answer the request with an error. - #[must_use] - pub fn error(&self, error: RpcError) -> Message { - Message::Response(Response { - jsonrpc: Version, - result: Err(error), - id: self.id.clone(), - }) - } -} - -/// A response to an RPC. -/// -/// It is created by the methods on [Request](struct.Request.html). -#[expect( - clippy::partial_pub_fields, - reason = "We don't want to allow access to the version, but the others are public for ease of use" -)] -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Response { - jsonrpc: Version, - pub result: Result, - pub id: Value, -} - -impl serde::Serialize for Response { - fn serialize(&self, serializer: S) -> Result { - let mut sub = serializer.serialize_struct("Response", 3)?; - sub.serialize_field("jsonrpc", &self.jsonrpc)?; - match &self.result { - Ok(value) => sub.serialize_field("result", value), - Err(err) => sub.serialize_field("error", err), - }?; - sub.serialize_field("id", &self.id)?; - sub.end() - } -} - -/// A helper trick for deserialization. -#[derive(serde::Deserialize)] -#[serde(deny_unknown_fields)] -struct WireResponse { - // It is actually used to eat and sanity check the deserialized text - #[serde(rename = "jsonrpc")] - _jsonrpc: Version, - // Make sure we accept null as Some(Value::Null), instead of going to None - #[serde(default, deserialize_with = "some_value")] - result: Option, - error: Option, - id: Value, -} - -// Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar -// structure that directly corresponds to whatever is on the wire and then convert it to our more -// convenient representation. -impl<'de> serde::Deserialize<'de> for Response { - fn deserialize>(deserializer: D) -> Result { - let wr: WireResponse = serde::Deserialize::deserialize(deserializer)?; - let result = match (wr.result, wr.error) { - (Some(res), None) => Ok(res), - (None, Some(err)) => Err(err), - _ => { - let err = D::Error::custom("Either 'error' or 'result' is expected, but not both"); - return Err(err); - } - }; - Ok(Self { - jsonrpc: Version, - result, - id: wr.id, - }) - } -} - -/// A notification (doesn't expect an answer). -#[expect( - clippy::partial_pub_fields, - reason = "We don't want to allow access to the version, but the others are public for ease of use" -)] -#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)] -#[serde(deny_unknown_fields)] -pub struct Notification { - jsonrpc: Version, - pub method: String, - #[serde(default, skip_serializing_if = "Value::is_null")] - pub params: Value, -} - -/// One message of the JSON RPC protocol. -/// -/// One message, directly mapped from the structures of the protocol. See the -/// [specification](http://www.jsonrpc.org/specification) for more details. -/// -/// Since the protocol allows one endpoint to be both client and server at the same time, the -/// message can decode and encode both directions of the protocol. -/// -/// The `Batch` variant is supposed to be created directly, without a constructor. -/// -/// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests -/// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level -/// element, it is returned as `Err(Broken::Unmatched)`. -#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(untagged)] -pub enum Message { - /// An RPC request. - Request(Request), - /// A response to a Request. - Response(Response), - /// A notification. - Notification(Notification), - /// A batch of more requests or responses. - /// - /// The protocol allows bundling multiple requests, notifications or responses to a single - /// message. - /// - /// This variant has no direct constructor and is expected to be constructed manually. - Batch(Vec), - /// An unmatched sub entry in a `Batch`. - /// - /// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one - /// is represented by this. This is never produced as a top-level value when parsing, the - /// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize. - #[serde(skip_serializing)] - UnmatchedSub(Value), -} - -impl Message { - /// A constructor for a request. - /// - /// The ID is auto-set to dontcare. - #[must_use] - pub fn request(method: String, params: Value) -> Self { - let id = Value::from("dontcare"); - Self::Request(Request { - jsonrpc: Version, - method, - params, - id, - }) - } - - /// Create a top-level error (without an ID). - #[must_use] - pub const fn error(error: RpcError) -> Self { - Self::Response(Response { - jsonrpc: Version, - result: Err(error), - id: Value::Null, - }) - } - - /// A constructor for a notification. - #[must_use] - pub const fn notification(method: String, params: Value) -> Self { - Self::Notification(Notification { - jsonrpc: Version, - method, - params, - }) - } - - /// A constructor for a response. - #[must_use] - pub const fn response(id: Value, result: Result) -> Self { - Self::Response(Response { - jsonrpc: Version, - result, - id, - }) - } - - /// Returns id or Null if there is no id. - #[must_use] - pub fn id(&self) -> Value { - match self { - Self::Request(req) => req.id.clone(), - Self::Response(response) => response.id.clone(), - Self::Notification(_) | Self::Batch(_) | Self::UnmatchedSub(_) => Value::Null, - } - } -} - -impl From for String { - fn from(val: Message) -> Self { - ::serde_json::ser::to_string(&val).expect("message serialization to json should not fail") - } -} - -impl From for Vec { - fn from(val: Message) -> Self { - ::serde_json::ser::to_vec(&val) - .expect("message serialization to json bytes should not fail") - } -} - -/// A broken message. -/// -/// Protocol-level errors. -#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)] -#[serde(untagged)] -pub enum Broken { - /// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message. - Unmatched(Value), - /// Invalid JSON. - #[serde(skip_deserializing)] - SyntaxError(String), -} - -impl Broken { - /// Generate an appropriate error message. - /// - /// The error message for these things are specified in the RFC, so this just creates an error - /// with the right values. - #[must_use] - pub fn reply(&self) -> Message { - match self { - Self::Unmatched(_) => Message::error(RpcError::parse_error( - "JSON RPC Request format was expected".to_owned(), - )), - Self::SyntaxError(e) => Message::error(RpcError::parse_error(e.clone())), - } - } -} - -/// A trick to easily deserialize and detect valid JSON, but invalid Message. -#[derive(serde::Deserialize)] -#[serde(untagged)] -pub enum WireMessage { - Message(Message), - Broken(Broken), -} - -pub fn decoded_to_parsed(res: JsonResult) -> Parsed { - match res { - Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)), - Ok(WireMessage::Message(m)) => Ok(m), - Ok(WireMessage::Broken(b)) => Err(b), - Err(e) => Err(Broken::SyntaxError(e.to_string())), - } -} - -/// Read a [Message](enum.Message.html) from a slice. -/// -/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). -pub fn from_slice(s: &[u8]) -> Parsed { - decoded_to_parsed(::serde_json::de::from_slice(s)) -} - -/// Read a [Message](enum.Message.html) from a string. -/// -/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html). -pub fn from_str(s: &str) -> Parsed { - from_slice(s.as_bytes()) -} - -/// Deserializer for `Option` that produces `Some(Value::Null)`. -/// -/// The usual one produces None in that case. But we need to know the difference between -/// `{x: null}` and `{}`. -fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { - serde::Deserialize::deserialize(deserializer).map(Some) -} - -#[cfg(test)] -mod tests { - use serde_json::{Value, de::from_slice, json, ser::to_vec}; - - use super::*; - - /// Test serialization and deserialization of the Message. - /// - /// We first deserialize it from a string. That way we check deserialization works. - /// But since serialization doesn't have to produce the exact same result (order, spaces, …), - /// we then serialize and deserialize the thing again and check it matches. - #[test] - fn message_serde() { - // A helper for running one message test - fn one(input: &str, expected: &Message) { - let parsed: Message = from_str(input).unwrap(); - assert_eq!(*expected, parsed); - let serialized = to_vec(&parsed).unwrap(); - let deserialized: Message = from_slice(&serialized).unwrap(); - assert_eq!(parsed, deserialized); - } - - // A request without parameters - one( - r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#, - &Message::Request(Request { - jsonrpc: Version, - method: "call".to_owned(), - params: Value::Null, - id: json!(1), - }), - ); - // A request with parameters - one( - r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#, - &Message::Request(Request { - jsonrpc: Version, - method: "call".to_owned(), - params: json!([1, 2, 3]), - id: json!(2), - }), - ); - // A notification (with parameters) - one( - r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#, - &Message::Notification(Notification { - jsonrpc: Version, - method: "notif".to_owned(), - params: json!({"x": "y"}), - }), - ); - // A successful response - one( - r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#, - &Message::Response(Response { - jsonrpc: Version, - result: Ok(json!(42)), - id: json!(3), - }), - ); - // A successful response - one( - r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#, - &Message::Response(Response { - jsonrpc: Version, - result: Ok(Value::Null), - id: json!(3), - }), - ); - // An error - one( - r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#, - &Message::Response(Response { - jsonrpc: Version, - result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), - id: Value::Null, - }), - ); - // A batch - one( - r#"[ - {"jsonrpc": "2.0", "method": "notif"}, - {"jsonrpc": "2.0", "method": "call", "id": 42} - ]"#, - &Message::Batch(vec![ - Message::Notification(Notification { - jsonrpc: Version, - method: "notif".to_owned(), - params: Value::Null, - }), - Message::Request(Request { - jsonrpc: Version, - method: "call".to_owned(), - params: Value::Null, - id: json!(42), - }), - ]), - ); - // Some handling of broken messages inside a batch - let parsed = from_str( - r#"[ - {"jsonrpc": "2.0", "method": "notif"}, - {"jsonrpc": "2.0", "method": "call", "id": 42}, - true - ]"#, - ) - .unwrap(); - assert_eq!( - Message::Batch(vec![ - Message::Notification(Notification { - jsonrpc: Version, - method: "notif".to_owned(), - params: Value::Null, - }), - Message::Request(Request { - jsonrpc: Version, - method: "call".to_owned(), - params: Value::Null, - id: json!(42), - }), - Message::UnmatchedSub(Value::Bool(true)), - ]), - parsed - ); - to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err(); - } - - /// A helper for the `broken` test. - /// - /// Check that the given JSON string parses, but is not recognized as a valid RPC message. - /// - /// Test things that are almost but not entirely JSONRPC are rejected. - /// - /// The reject is done by returning it as Unmatched. - #[test] - fn broken() { - // A helper with one test - fn one(input: &str) { - let msg = from_str(input); - match msg { - Err(Broken::Unmatched(_)) => (), - _ => panic!("{input} recognized as an RPC message: {msg:?}!"), - } - } - - // Missing the version - one(r#"{"method": "notif"}"#); - // Wrong version - one(r#"{"jsonrpc": 2.0, "method": "notif"}"#); - // A response with both result and error - one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#); - // A response without an id - one(r#"{"jsonrpc": "2.0", "result": 42}"#); - // An extra field - one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#); - // Something completely different - one(r#"{"x": [1, 2, 3]}"#); - - match from_str("{]") { - Err(Broken::SyntaxError(_)) => (), - other => panic!("Something unexpected: {other:?}"), - } - } - - /// Test some non-trivial aspects of the constructors. - /// - /// This doesn't have a full coverage, because there's not much to actually test there. - /// Most of it is related to the ids. - #[test] - #[ignore = "Not a full coverage test"] - fn constructors() { - let msg1 = Message::request("call".to_owned(), json!([1, 2, 3])); - let msg2 = Message::request("call".to_owned(), json!([1, 2, 3])); - // They differ, even when created with the same parameters - assert_ne!(msg1, msg2); - // And, specifically, they differ in the ID's - let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) { - assert_ne!(req1.id, req2.id); - assert!(req1.id.is_string()); - assert!(req2.id.is_string()); - (req1, req2) - } else { - panic!("Non-request received"); - }; - let id1 = req1.id.clone(); - // When we answer a message, we get the same ID - if let Message::Response(resp) = req1.reply(json!([1, 2, 3])) { - assert_eq!( - resp, - Response { - jsonrpc: Version, - result: Ok(json!([1, 2, 3])), - id: id1 - } - ); - } else { - panic!("Not a response"); - } - let id2 = req2.id.clone(); - // The same with an error - if let Message::Response(resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) { - assert_eq!( - resp, - Response { - jsonrpc: Version, - result: Err(RpcError::new(42, "Wrong!".to_owned(), None)), - id: id2, - } - ); - } else { - panic!("Not a response"); - } - // When we have unmatched, we generate a top-level error with Null id. - if let Message::Response(resp) = - Message::error(RpcError::new(43, "Also wrong!".to_owned(), None)) - { - assert_eq!( - resp, - Response { - jsonrpc: Version, - result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)), - id: Value::Null, - } - ); - } else { - panic!("Not a response"); - } - } -} diff --git a/common/src/rpc_primitives/mod.rs b/common/src/rpc_primitives/mod.rs deleted file mode 100644 index cd643712..00000000 --- a/common/src/rpc_primitives/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -use bytesize::ByteSize; -use serde::{Deserialize, Serialize}; - -pub mod errors; -pub mod message; -pub mod parser; -pub mod requests; - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct RpcLimitsConfig { - /// Maximum byte size of the json payload. - pub json_payload_max_size: ByteSize, -} - -impl Default for RpcLimitsConfig { - fn default() -> Self { - Self { - json_payload_max_size: ByteSize::mib(10), - } - } -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct RpcConfig { - pub addr: String, - pub cors_allowed_origins: Vec, - #[serde(default)] - pub limits_config: RpcLimitsConfig, -} - -impl Default for RpcConfig { - fn default() -> Self { - Self { - addr: "0.0.0.0:3040".to_owned(), - cors_allowed_origins: vec!["*".to_owned()], - limits_config: RpcLimitsConfig::default(), - } - } -} - -impl RpcConfig { - #[must_use] - pub fn new(addr: &str) -> Self { - Self { - addr: addr.to_owned(), - ..Default::default() - } - } - - #[must_use] - pub fn with_port(port: u16) -> Self { - Self { - addr: format!("0.0.0.0:{port}"), - ..Default::default() - } - } -} diff --git a/common/src/rpc_primitives/parser.rs b/common/src/rpc_primitives/parser.rs deleted file mode 100644 index 0b918c94..00000000 --- a/common/src/rpc_primitives/parser.rs +++ /dev/null @@ -1,29 +0,0 @@ -use serde::de::DeserializeOwned; -use serde_json::Value; - -use super::errors::RpcParseError; - -#[macro_export] -macro_rules! parse_request { - ($request_name:ty) => { - impl RpcRequest for $request_name { - fn parse(value: Option) -> Result { - parse_params::(value) - } - } - }; -} - -pub trait RpcRequest: Sized { - fn parse(value: Option) -> Result; -} - -pub fn parse_params(value: Option) -> Result { - value.map_or_else( - || Err(RpcParseError("Require at least one parameter".to_owned())), - |value| { - serde_json::from_value(value) - .map_err(|err| RpcParseError(format!("Failed parsing args: {err}"))) - }, - ) -} diff --git a/common/src/rpc_primitives/requests.rs b/common/src/rpc_primitives/requests.rs deleted file mode 100644 index fd566c89..00000000 --- a/common/src/rpc_primitives/requests.rs +++ /dev/null @@ -1,219 +0,0 @@ -use std::collections::HashMap; - -use nssa::AccountId; -use nssa_core::program::ProgramId; -use serde::{Deserialize, Serialize}; -use serde_json::Value; - -use super::{ - errors::RpcParseError, - parser::{RpcRequest, parse_params}, -}; -use crate::{HashType, parse_request}; - -mod base64_deser { - use base64::{Engine as _, engine::general_purpose}; - use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _}; - - pub mod vec { - use super::*; - - pub fn serialize(bytes_vec: &[Vec], serializer: S) -> Result - where - S: Serializer, - { - let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?; - for bytes in bytes_vec { - let s = general_purpose::STANDARD.encode(bytes); - seq.serialize_element(&s)?; - } - seq.end() - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> - where - D: Deserializer<'de>, - { - let base64_strings: Vec = Deserialize::deserialize(deserializer)?; - base64_strings - .into_iter() - .map(|s| { - general_purpose::STANDARD - .decode(&s) - .map_err(serde::de::Error::custom) - }) - .collect() - } - } - - pub fn serialize(bytes: &[u8], serializer: S) -> Result - where - S: Serializer, - { - let base64_string = general_purpose::STANDARD.encode(bytes); - serializer.serialize_str(&base64_string) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let base64_string: String = Deserialize::deserialize(deserializer)?; - general_purpose::STANDARD - .decode(&base64_string) - .map_err(serde::de::Error::custom) - } -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct HelloRequest; - -#[derive(Serialize, Deserialize, Debug)] -pub struct RegisterAccountRequest { - pub account_id: [u8; 32], -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct SendTxRequest { - #[serde(with = "base64_deser")] - pub transaction: Vec, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetBlockDataRequest { - pub block_id: u64, -} - -/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive). -#[derive(Serialize, Deserialize, Debug)] -pub struct GetBlockRangeDataRequest { - pub start_block_id: u64, - pub end_block_id: u64, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetGenesisIdRequest; - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetLastBlockRequest; - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetInitialTestnetAccountsRequest; - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountBalanceRequest { - pub account_id: AccountId, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetTransactionByHashRequest { - pub hash: HashType, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountsNoncesRequest { - pub account_ids: Vec, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountRequest { - pub account_id: AccountId, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetProofForCommitmentRequest { - pub commitment: nssa_core::Commitment, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetProgramIdsRequest; - -parse_request!(HelloRequest); -parse_request!(RegisterAccountRequest); -parse_request!(SendTxRequest); -parse_request!(GetBlockDataRequest); -parse_request!(GetBlockRangeDataRequest); -parse_request!(GetGenesisIdRequest); -parse_request!(GetLastBlockRequest); -parse_request!(GetInitialTestnetAccountsRequest); -parse_request!(GetAccountBalanceRequest); -parse_request!(GetTransactionByHashRequest); -parse_request!(GetAccountsNoncesRequest); -parse_request!(GetProofForCommitmentRequest); -parse_request!(GetAccountRequest); -parse_request!(GetProgramIdsRequest); - -#[derive(Serialize, Deserialize, Debug)] -pub struct HelloResponse { - pub greeting: String, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct RegisterAccountResponse { - pub status: String, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct SendTxResponse { - pub status: String, - pub tx_hash: HashType, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetBlockDataResponse { - #[serde(with = "base64_deser")] - pub block: Vec, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetBlockRangeDataResponse { - #[serde(with = "base64_deser::vec")] - pub blocks: Vec>, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetGenesisIdResponse { - pub genesis_id: u64, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetLastBlockResponse { - pub last_block: u64, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountBalanceResponse { - pub balance: u128, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountsNoncesResponse { - pub nonces: Vec, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetTransactionByHashResponse { - pub transaction: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetAccountResponse { - pub account: nssa::Account, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetProofForCommitmentResponse { - pub membership_proof: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GetProgramIdsResponse { - pub program_ids: HashMap, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct GetInitialTestnetAccountsResponse { - /// Hex encoded account id. - pub account_id: String, - pub balance: u64, -} diff --git a/common/src/sequencer_client.rs b/common/src/sequencer_client.rs deleted file mode 100644 index d52e4585..00000000 --- a/common/src/sequencer_client.rs +++ /dev/null @@ -1,361 +0,0 @@ -use std::{collections::HashMap, ops::RangeInclusive}; - -use anyhow::Result; -use nssa::AccountId; -use nssa_core::program::ProgramId; -use reqwest::Client; -use serde::Deserialize; -use serde_json::Value; -use url::Url; - -use super::rpc_primitives::requests::{ - GetAccountBalanceRequest, GetAccountBalanceResponse, GetBlockDataRequest, GetBlockDataResponse, - GetGenesisIdRequest, GetGenesisIdResponse, GetInitialTestnetAccountsRequest, -}; -use crate::{ - HashType, - config::BasicAuth, - error::{SequencerClientError, SequencerRpcError}, - rpc_primitives::{ - self, - requests::{ - GetAccountRequest, GetAccountResponse, GetAccountsNoncesRequest, - GetAccountsNoncesResponse, GetBlockRangeDataRequest, GetBlockRangeDataResponse, - GetInitialTestnetAccountsResponse, GetLastBlockRequest, GetLastBlockResponse, - GetProgramIdsRequest, GetProgramIdsResponse, GetProofForCommitmentRequest, - GetProofForCommitmentResponse, GetTransactionByHashRequest, - GetTransactionByHashResponse, SendTxRequest, SendTxResponse, - }, - }, - transaction::NSSATransaction, -}; - -#[derive(Debug, Clone, Deserialize)] -struct SequencerRpcResponse { - #[serde(rename = "jsonrpc")] - _jsonrpc: String, - result: serde_json::Value, - #[serde(rename = "id")] - _id: u64, -} - -#[derive(Clone)] -pub struct SequencerClient { - pub client: reqwest::Client, - pub sequencer_addr: Url, - pub basic_auth: Option, -} - -impl SequencerClient { - pub fn new(sequencer_addr: Url) -> Result { - Self::new_with_auth(sequencer_addr, None) - } - - pub fn new_with_auth(sequencer_addr: Url, basic_auth: Option) -> Result { - Ok(Self { - client: Client::builder() - // Add more fields if needed - .timeout(std::time::Duration::from_secs(60)) - // Should be kept in sync with server keep-alive settings - .pool_idle_timeout(std::time::Duration::from_secs(5)) - .build()?, - sequencer_addr, - basic_auth, - }) - } - - pub async fn call_method_with_payload( - &self, - method: &str, - payload: Value, - ) -> Result { - let request = - rpc_primitives::message::Request::from_payload_version_2_0(method.to_owned(), payload); - - log::debug!( - "Calling method {method} with payload {request:?} to sequencer at {}", - self.sequencer_addr - ); - - let strategy = tokio_retry::strategy::FixedInterval::from_millis(10000).take(60); - - let response_vall = tokio_retry::Retry::spawn(strategy, || async { - let mut call_builder = self.client.post(self.sequencer_addr.clone()); - - if let Some(BasicAuth { username, password }) = &self.basic_auth { - call_builder = call_builder.basic_auth(username, password.as_deref()); - } - - let call_res_res = call_builder.json(&request).send().await; - - match call_res_res { - Err(err) => Err(err), - Ok(call_res) => call_res.json::().await, - } - }) - .await?; - - if let Ok(response) = serde_json::from_value::(response_vall.clone()) - { - Ok(response.result) - } else { - let err_resp = serde_json::from_value::(response_vall)?; - - Err(err_resp.into()) - } - } - - /// Get block data at `block_id` from sequencer. - pub async fn get_block( - &self, - block_id: u64, - ) -> Result { - let block_req = GetBlockDataRequest { block_id }; - - let req = serde_json::to_value(block_req)?; - - let resp = self.call_method_with_payload("get_block", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - pub async fn get_block_range( - &self, - range: RangeInclusive, - ) -> Result { - let block_req = GetBlockRangeDataRequest { - start_block_id: *range.start(), - end_block_id: *range.end(), - }; - - let req = serde_json::to_value(block_req)?; - - let resp = self - .call_method_with_payload("get_block_range", req) - .await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get last known `blokc_id` from sequencer. - pub async fn get_last_block(&self) -> Result { - let block_req = GetLastBlockRequest {}; - - let req = serde_json::to_value(block_req)?; - - let resp = self.call_method_with_payload("get_last_block", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get account public balance for `account_id`. `account_id` must be a valid hex-string for 32 - /// bytes. - pub async fn get_account_balance( - &self, - account_id: AccountId, - ) -> Result { - let block_req = GetAccountBalanceRequest { account_id }; - - let req = serde_json::to_value(block_req)?; - - let resp = self - .call_method_with_payload("get_account_balance", req) - .await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get accounts nonces for `account_ids`. `account_ids` must be a list of valid hex-strings for - /// 32 bytes. - pub async fn get_accounts_nonces( - &self, - account_ids: Vec, - ) -> Result { - let block_req = GetAccountsNoncesRequest { account_ids }; - - let req = serde_json::to_value(block_req)?; - - let resp = self - .call_method_with_payload("get_accounts_nonces", req) - .await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - pub async fn get_account( - &self, - account_id: AccountId, - ) -> Result { - let block_req = GetAccountRequest { account_id }; - - let req = serde_json::to_value(block_req)?; - - let resp = self.call_method_with_payload("get_account", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get transaction details for `hash`. - pub async fn get_transaction_by_hash( - &self, - hash: HashType, - ) -> Result { - let block_req = GetTransactionByHashRequest { hash }; - - let req = serde_json::to_value(block_req)?; - - let resp = self - .call_method_with_payload("get_transaction_by_hash", req) - .await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Send transaction to sequencer. - pub async fn send_tx_public( - &self, - transaction: nssa::PublicTransaction, - ) -> Result { - let transaction = NSSATransaction::Public(transaction); - - let tx_req = SendTxRequest { - transaction: borsh::to_vec(&transaction).unwrap(), - }; - - let req = serde_json::to_value(tx_req)?; - - let resp = self.call_method_with_payload("send_tx", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Send transaction to sequencer. - pub async fn send_tx_private( - &self, - transaction: nssa::PrivacyPreservingTransaction, - ) -> Result { - let transaction = NSSATransaction::PrivacyPreserving(transaction); - - let tx_req = SendTxRequest { - transaction: borsh::to_vec(&transaction).unwrap(), - }; - - let req = serde_json::to_value(tx_req)?; - - let resp = self.call_method_with_payload("send_tx", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get genesis id from sequencer. - pub async fn get_genesis_id(&self) -> Result { - let genesis_req = GetGenesisIdRequest {}; - - let req = serde_json::to_value(genesis_req).unwrap(); - - let resp = self - .call_method_with_payload("get_genesis", req) - .await - .unwrap(); - - let resp_deser = serde_json::from_value(resp).unwrap(); - - Ok(resp_deser) - } - - /// Get initial testnet accounts from sequencer. - pub async fn get_initial_testnet_accounts( - &self, - ) -> Result, SequencerClientError> { - let acc_req = GetInitialTestnetAccountsRequest {}; - - let req = serde_json::to_value(acc_req).unwrap(); - - let resp = self - .call_method_with_payload("get_initial_testnet_accounts", req) - .await - .unwrap(); - - let resp_deser = serde_json::from_value(resp).unwrap(); - - Ok(resp_deser) - } - - /// Get proof for commitment. - pub async fn get_proof_for_commitment( - &self, - commitment: nssa_core::Commitment, - ) -> Result, SequencerClientError> { - let acc_req = GetProofForCommitmentRequest { commitment }; - - let req = serde_json::to_value(acc_req).unwrap(); - - let resp = self - .call_method_with_payload("get_proof_for_commitment", req) - .await - .unwrap(); - - let resp_deser = serde_json::from_value::(resp) - .unwrap() - .membership_proof; - - Ok(resp_deser) - } - - pub async fn send_tx_program( - &self, - transaction: nssa::ProgramDeploymentTransaction, - ) -> Result { - let transaction = NSSATransaction::ProgramDeployment(transaction); - - let tx_req = SendTxRequest { - transaction: borsh::to_vec(&transaction).unwrap(), - }; - - let req = serde_json::to_value(tx_req)?; - - let resp = self.call_method_with_payload("send_tx", req).await?; - - let resp_deser = serde_json::from_value(resp)?; - - Ok(resp_deser) - } - - /// Get Ids of the programs used by the node. - pub async fn get_program_ids( - &self, - ) -> Result, SequencerClientError> { - let acc_req = GetProgramIdsRequest {}; - - let req = serde_json::to_value(acc_req).unwrap(); - - let resp = self - .call_method_with_payload("get_program_ids", req) - .await - .unwrap(); - - let resp_deser = serde_json::from_value::(resp) - .unwrap() - .program_ids; - - Ok(resp_deser) - } -} diff --git a/common/src/transaction.rs b/common/src/transaction.rs index 8fdc2074..7ce0e76f 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -1,6 +1,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use log::warn; -use nssa::{AccountId, V02State}; +use nssa::{AccountId, V03State, ValidatedStateDiff}; +use nssa_core::{BlockId, Timestamp}; use serde::{Deserialize, Serialize}; use crate::HashType; @@ -12,6 +13,18 @@ pub enum NSSATransaction { ProgramDeployment(nssa::ProgramDeploymentTransaction), } +impl Serialize for NSSATransaction { + fn serialize(&self, serializer: S) -> Result { + crate::borsh_base64::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for NSSATransaction { + fn deserialize>(deserializer: D) -> Result { + crate::borsh_base64::deserialize(deserializer) + } +} + impl NSSATransaction { #[must_use] pub fn hash(&self) -> HashType { @@ -53,17 +66,53 @@ impl NSSATransaction { } } + /// Validates the transaction against the current state and returns the resulting diff + /// without applying it. Rejects transactions that modify clock system accounts. + pub fn validate_on_state( + &self, + state: &V03State, + block_id: BlockId, + timestamp: Timestamp, + ) -> Result { + let diff = match self { + Self::Public(tx) => { + ValidatedStateDiff::from_public_transaction(tx, state, block_id, timestamp) + } + Self::PrivacyPreserving(tx) => ValidatedStateDiff::from_privacy_preserving_transaction( + tx, state, block_id, timestamp, + ), + Self::ProgramDeployment(tx) => { + ValidatedStateDiff::from_program_deployment_transaction(tx, state) + } + }?; + + let public_diff = diff.public_diff(); + let touches_clock = nssa::CLOCK_PROGRAM_ACCOUNT_IDS.iter().any(|id| { + public_diff + .get(id) + .is_some_and(|post| *post != state.get_account_by_id(*id)) + }); + if touches_clock { + return Err(nssa::error::NssaError::InvalidInput( + "Transaction modifies system clock accounts".into(), + )); + } + + Ok(diff) + } + + /// Validates the transaction against the current state, rejects modifications to clock + /// system accounts, and applies the resulting diff to the state. pub fn execute_check_on_state( self, - state: &mut V02State, + state: &mut V03State, + block_id: BlockId, + timestamp: Timestamp, ) -> Result { - match &self { - Self::Public(tx) => state.transition_from_public_transaction(tx), - Self::PrivacyPreserving(tx) => state.transition_from_privacy_preserving_transaction(tx), - Self::ProgramDeployment(tx) => state.transition_from_program_deployment_transaction(tx), - } - .inspect_err(|err| warn!("Error at transition {err:#?}"))?; - + let diff = self + .validate_on_state(state, block_id, timestamp) + .inspect_err(|err| warn!("Error at transition {err:#?}"))?; + state.apply_state_diff(diff); Ok(self) } } @@ -87,7 +136,7 @@ impl From for NSSATransaction { } #[derive( - Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize, + Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize, )] pub enum TxKind { Public, @@ -104,3 +153,20 @@ pub enum TransactionMalformationError { #[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")] TransactionTooLarge { size: usize, max: usize }, } + +/// Returns the canonical Clock Program invocation transaction for the given block timestamp. +/// Every valid block must end with exactly one occurrence of this transaction. +#[must_use] +pub fn clock_invocation(timestamp: clock_core::Instruction) -> nssa::PublicTransaction { + let message = nssa::public_transaction::Message::try_new( + nssa::program::Program::clock().id(), + clock_core::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(), + vec![], + timestamp, + ) + .expect("Clock invocation message should always be constructable"); + nssa::PublicTransaction::new( + message, + nssa::public_transaction::WitnessSet::from_raw_parts(vec![]), + ) +} diff --git a/configs/docker-all-in-one/indexer/indexer_config.json b/configs/docker-all-in-one/indexer_config.json similarity index 100% rename from configs/docker-all-in-one/indexer/indexer_config.json rename to configs/docker-all-in-one/indexer_config.json diff --git a/configs/docker-all-in-one/sequencer/sequencer_config.json b/configs/docker-all-in-one/sequencer_config.json similarity index 97% rename from configs/docker-all-in-one/sequencer/sequencer_config.json rename to configs/docker-all-in-one/sequencer_config.json index 8fc34911..d7fd3490 100644 --- a/configs/docker-all-in-one/sequencer/sequencer_config.json +++ b/configs/docker-all-in-one/sequencer_config.json @@ -1,6 +1,5 @@ { - "home": "/var/lib/sequencer_runner", - "override_rust_log": null, + "home": "/var/lib/sequencer_service", "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, @@ -8,7 +7,6 @@ "mempool_max_size": 10000, "block_create_timeout": "10s", "retry_pending_blocks_timeout": "7s", - "port": 3040, "bedrock_config": { "backoff": { "start_delay": "100ms", diff --git a/docker-compose.override.yml b/docker-compose.override.yml index fe0d18f9..db955b23 100644 --- a/docker-compose.override.yml +++ b/docker-compose.override.yml @@ -7,21 +7,21 @@ services: environment: - RUST_LOG=error - sequencer_runner: + sequencer_service: depends_on: - logos-blockchain-node-0 - indexer_service - volumes: !override - - ./configs/docker-all-in-one/sequencer:/etc/sequencer_runner + volumes: + - ./configs/docker-all-in-one/sequencer_config.json:/etc/sequencer_service/sequencer_config.json indexer_service: depends_on: - logos-blockchain-node-0 volumes: - - ./configs/docker-all-in-one/indexer/indexer_config.json:/etc/indexer_service/indexer_config.json + - ./configs/docker-all-in-one/indexer_config.json:/etc/indexer_service/indexer_config.json explorer_service: depends_on: - indexer_service environment: - - INDEXER_RPC_URL=http://indexer_service:8779 \ No newline at end of file + - INDEXER_RPC_URL=http://indexer_service:8779 diff --git a/docker-compose.yml b/docker-compose.yml index 93b5896b..4fd3910f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,7 @@ include: - path: bedrock/docker-compose.yml - path: - sequencer_runner/docker-compose.yml + sequencer/service/docker-compose.yml - path: indexer/service/docker-compose.yml - path: diff --git a/docs/LEZ testnet v0.1 tutorials/associated-token-accounts.md b/docs/LEZ testnet v0.1 tutorials/associated-token-accounts.md new file mode 100644 index 00000000..330ae909 --- /dev/null +++ b/docs/LEZ testnet v0.1 tutorials/associated-token-accounts.md @@ -0,0 +1,369 @@ +# Associated Token Accounts (ATAs) + +This tutorial covers Associated Token Accounts (ATAs). An ATA lets you derive a unique token holding address from an owner account and a token definition — no need to create and track holding accounts manually. Given the same inputs, anyone can compute the same ATA address without a network call. By the end, you will have practiced: + +1. Deriving ATA addresses locally. +2. Creating an ATA. +3. Sending tokens via ATAs. +4. Burning tokens from an ATA. +5. Listing ATAs across multiple token definitions. +6. Creating an ATA with a private owner. +7. Sending tokens from a private owner's ATA. +8. Burning tokens from a private owner's ATA. + +> [!Important] +> This tutorial assumes you have completed the [wallet-setup](wallet-setup.md) and [custom-tokens](custom-tokens.md) tutorials. You need a running wallet with accounts and at least one token definition. + +## Prerequisites + +### Deploy the ATA program + +Unlike the Token program (which is built-in), the ATA program must be deployed before you can use it. The pre-built binary is included in the repository: + +```bash +wallet deploy-program artifacts/program_methods/associated_token_account.bin +``` + +> [!Note] +> Program deployment is idempotent — if the ATA program has already been deployed (e.g. by another user on the same network), the command is a no-op. + +You can verify the deployment succeeded by running any `wallet ata` command. If the program is not deployed, commands that submit transactions will fail. + +The CLI provides commands to work with the ATA program. Run `wallet ata` to see the options: + +```bash +Commands: + address Derive and print the Associated Token Account address (local only, no network) + create Create (or idempotently no-op) the Associated Token Account + send Send tokens from owner's ATA to a recipient + burn Burn tokens from holder's ATA + list List all ATAs for a given owner across multiple token definitions + help Print this message or the help of the given subcommand(s) +``` + +## 1. How ATA addresses work + +An ATA address is deterministically derived from two inputs: + +1. The **owner** account ID. +2. The **token definition** account ID. + +The derivation works as follows: + +``` +seed = SHA256(owner_id || definition_id) +ata_address = AccountId::from((ata_program_id, seed)) +``` + +Because the computation is pure, anyone who knows the owner and definition can reproduce the exact same ATA address — no network call required. + +> [!Note] +> All ATA commands that submit transactions accept a privacy prefix on the owner/holder argument — `Public/` for public accounts and `Private/` for private accounts. Using `Private/` generates a zero-knowledge proof locally and submits only the proof to the sequencer, keeping the owner's identity off-chain. + +## 2. Deriving an ATA address (`wallet ata address`) + +The `address` subcommand computes the ATA address locally without submitting a transaction. + +### a. Set up an owner and token definition + +If you already have a public account and a token definition from the custom-tokens tutorial, you can reuse them. Otherwise, create them now: + +```bash +wallet account new public + +# Output: +Generated new account with account_id Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB +``` + +```bash +wallet account new public + +# Output: +Generated new account with account_id Public/3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 +``` + +```bash +wallet token new \ + --name MYTOKEN \ + --total-supply 10000 \ + --definition-account-id Public/3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + --supply-account-id Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB +``` + +### b. Derive the ATA address + +```bash +wallet ata address \ + --owner 5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 + +# Output: +7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R +``` + +> [!Note] +> This is a pure computation — no transaction is submitted and no network connection is needed. The same inputs will always produce the same output. + +## 3. Creating an ATA (`wallet ata create`) + +Before an ATA can hold tokens it must be created on-chain. The `create` subcommand submits a transaction that initializes the ATA. If it already exists, the operation is a no-op. + +### a. Create the ATA + +```bash +wallet ata create \ + --owner Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 +``` + +### b. Inspect the ATA + +Use the ATA address derived in the previous section: + +```bash +wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":0} +``` + +> [!Tip] +> Creation is idempotent — running the same command again is a no-op. + +## 4. Sending tokens via ATA (`wallet ata send`) + +The `send` subcommand transfers tokens from the owner's ATA to a recipient account. + +### a. Fund the ATA + +First, move tokens into the ATA from the supply account created earlier: + +```bash +wallet token send \ + --from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --to Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R \ + --amount 5000 +``` + +### b. Create a recipient account + +```bash +wallet account new public + +# Output: +Generated new account with account_id Public/9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi +``` + +### c. Send tokens from the ATA to the recipient + +```bash +wallet ata send \ + --from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + --to 9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi \ + --amount 2000 +``` + +### d. Verify balances + +```bash +wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":3000} +``` + +```bash +wallet account get --account-id Public/9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi + +# Output: +Holding account owned by token program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":2000} +``` + +## 5. Burning tokens from an ATA (`wallet ata burn`) + +The `burn` subcommand destroys tokens held in the owner's ATA, reducing the token's total supply. + +### a. Burn tokens + +```bash +wallet ata burn \ + --holder Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + --amount 500 +``` + +### b. Verify the reduced balance + +```bash +wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":2500} +``` + +## 6. Listing ATAs (`wallet ata list`) + +The `list` subcommand queries ATAs for a given owner across one or more token definitions. + +### a. Create a second token and ATA + +Create a second token definition so there are multiple ATAs to list: + +```bash +wallet account new public + +# Output: +Generated new account with account_id Public/BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi +``` + +```bash +wallet account new public + +# Output: +Generated new account with account_id Public/Ck8mVp4YhWn2rXjD6dFsQtA7bEoLf3gUZx1wDnR9eTs +``` + +```bash +wallet token new \ + --name OTHERTOKEN \ + --total-supply 5000 \ + --definition-account-id Public/BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi \ + --supply-account-id Public/Ck8mVp4YhWn2rXjD6dFsQtA7bEoLf3gUZx1wDnR9eTs +``` + +Create an ATA for the second token: + +```bash +wallet ata create \ + --owner Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi +``` + +### b. List ATAs for both token definitions + +```bash +wallet ata list \ + --owner 5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --token-definition \ + 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi + +# Output: +ATA 7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R (definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4): balance 2500 +ATA 4nPxKd8YmW7rVsH2jDfQcA9bEoLf6gUZx3wTnR1eMs5 (definition BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi): balance 0 +``` + +> [!Note] +> The `list` command derives each ATA address locally and fetches its on-chain state. If an ATA has not been created for a given definition, it prints "No ATA for definition ..." instead. + +## 7. Private owner operations + +All three ATA operations — `create`, `send`, and `burn` — support private owner accounts. Passing a `Private/` prefix on the owner argument switches the wallet into privacy-preserving mode: + +1. The wallet builds the transaction locally. +2. The ATA program is executed inside the RISC0 ZK VM to generate a proof. +3. The proof, the updated ATA state (in plaintext), and an encrypted update for the owner's private account are submitted to the sequencer. +4. The sequencer verifies the proof, writes the ATA state change to the public chain, and records the owner's new commitment in the nullifier set. + +The result is that the ATA account and its token balance are **fully public** — anyone can see them. What stays private is the link between the ATA and its owner: the proof demonstrates that someone with the correct private key authorized the operation, but reveals nothing about which account that was. + +> [!Note] +> The ATA address is derived from `SHA256(owner_id || definition_id)`. Because SHA256 is one-way, the ATA address does not reveal the owner's identity. However, if the owner's account ID becomes known for any other reason, all of their ATAs across every token definition can be enumerated by anyone. + +### a. Create a private account + +```bash +wallet account new private + +# Output: +Generated new account with account_id Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi +``` + +### b. Create the ATA for the private owner + +Pass `Private/` on `--owner`. The token definition account has no privacy prefix — it is always a public account. + +```bash +wallet ata create \ + --owner Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 +``` + +> [!Note] +> Proof generation runs locally in the RISC0 ZK VM and can take up to a minute on first run. + +### c. Verify the ATA was created + +Derive the ATA address using the raw account ID (no privacy prefix): + +```bash +wallet ata address \ + --owner HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 + +# Output: +2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 +``` + +```bash +wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":0} +``` + +### d. Fund the ATA + +The ATA is a public account. Fund it with a direct token transfer from any public holding account: + +```bash +wallet token send \ + --from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \ + --to Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 \ + --amount 500 +``` + +### e. Send tokens from the private owner's ATA + +```bash +wallet ata send \ + --from Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + --to 9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi \ + --amount 200 +``` + +Verify the ATA balance decreased: + +```bash +wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":300} +``` + +### f. Burn tokens from the private owner's ATA + +```bash +wallet ata burn \ + --holder Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \ + --token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \ + --amount 100 +``` + +Verify the balance and token supply: + +```bash +wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 + +# Output: +Holding account owned by ata program +{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":200} +``` diff --git a/examples/program_deployment/Cargo.toml b/examples/program_deployment/Cargo.toml index 96964a36..c41d9247 100644 --- a/examples/program_deployment/Cargo.toml +++ b/examples/program_deployment/Cargo.toml @@ -8,8 +8,10 @@ license = { workspace = true } workspace = true [dependencies] +common.workspace = true nssa.workspace = true nssa_core.workspace = true +sequencer_service_rpc = { workspace = true, features = ["client"] } wallet.workspace = true tokio = { workspace = true, features = ["macros"] } diff --git a/examples/program_deployment/methods/guest/src/bin/hello_world.rs b/examples/program_deployment/methods/guest/src/bin/hello_world.rs index 3391eb5d..3e91db0e 100644 --- a/examples/program_deployment/methods/guest/src/bin/hello_world.rs +++ b/examples/program_deployment/methods/guest/src/bin/hello_world.rs @@ -1,6 +1,4 @@ -use nssa_core::program::{ - AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs, -}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; // Hello-world example program. // @@ -21,6 +19,8 @@ fn main() { // Read inputs let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: greeting, }, @@ -45,16 +45,19 @@ fn main() { // Wrap the post state account values inside a `AccountPostState` instance. // This is used to forward the account claiming request if any - let post_state = if post_account.program_owner == DEFAULT_PROGRAM_ID { - // This produces a claim request - AccountPostState::new_claimed(post_account) - } else { - // This doesn't produce a claim request - AccountPostState::new(post_account) - }; + let post_state = AccountPostState::new_claimed_if_default(post_account, Claim::Authorized); // The output is a proposed state difference. It will only succeed if the pre states coincide // with the previous values of the accounts, and the transition to the post states conforms // with the NSSA program rules. - write_nssa_outputs(instruction_data, vec![pre_state], vec![post_state]); + // WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be + // called to commit the output. + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_data, + vec![pre_state], + vec![post_state], + ) + .write(); } diff --git a/examples/program_deployment/methods/guest/src/bin/hello_world_with_authorization.rs b/examples/program_deployment/methods/guest/src/bin/hello_world_with_authorization.rs index e327ca47..70dfa2ae 100644 --- a/examples/program_deployment/methods/guest/src/bin/hello_world_with_authorization.rs +++ b/examples/program_deployment/methods/guest/src/bin/hello_world_with_authorization.rs @@ -1,6 +1,4 @@ -use nssa_core::program::{ - AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs, -}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; // Hello-world with authorization example program. // @@ -21,6 +19,8 @@ fn main() { // Read inputs let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: greeting, }, @@ -52,16 +52,19 @@ fn main() { // Wrap the post state account values inside a `AccountPostState` instance. // This is used to forward the account claiming request if any - let post_state = if post_account.program_owner == DEFAULT_PROGRAM_ID { - // This produces a claim request - AccountPostState::new_claimed(post_account) - } else { - // This doesn't produce a claim request - AccountPostState::new(post_account) - }; + let post_state = AccountPostState::new_claimed_if_default(post_account, Claim::Authorized); // The output is a proposed state difference. It will only succeed if the pre states coincide // with the previous values of the accounts, and the transition to the post states conforms // with the NSSA program rules. - write_nssa_outputs(instruction_data, vec![pre_state], vec![post_state]); + // WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be + // called to commit the output. + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_data, + vec![pre_state], + vec![post_state], + ) + .write(); } diff --git a/examples/program_deployment/methods/guest/src/bin/hello_world_with_move_function.rs b/examples/program_deployment/methods/guest/src/bin/hello_world_with_move_function.rs index 65f0f9cd..4289349b 100644 --- a/examples/program_deployment/methods/guest/src/bin/hello_world_with_move_function.rs +++ b/examples/program_deployment/methods/guest/src/bin/hello_world_with_move_function.rs @@ -1,8 +1,6 @@ use nssa_core::{ - account::{Account, AccountWithMetadata, Data}, - program::{ - AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs, - }, + account::{AccountWithMetadata, Data}, + program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}, }; // Hello-world with write + move_data example program. @@ -26,16 +24,6 @@ const MOVE_DATA_FUNCTION_ID: u8 = 1; type Instruction = (u8, Vec); -fn build_post_state(post_account: Account) -> AccountPostState { - if post_account.program_owner == DEFAULT_PROGRAM_ID { - // This produces a claim request - AccountPostState::new_claimed(post_account) - } else { - // This doesn't produce a claim request - AccountPostState::new(post_account) - } -} - fn write(pre_state: AccountWithMetadata, greeting: &[u8]) -> AccountPostState { // Construct the post state account values let post_account = { @@ -48,7 +36,7 @@ fn write(pre_state: AccountWithMetadata, greeting: &[u8]) -> AccountPostState { this }; - build_post_state(post_account) + AccountPostState::new_claimed_if_default(post_account, Claim::Authorized) } fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec { @@ -58,7 +46,7 @@ fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec< let from_post = { let mut this = from_pre.account; this.data = Data::default(); - build_post_state(this) + AccountPostState::new_claimed_if_default(this, Claim::Authorized) }; let to_post = { @@ -68,7 +56,7 @@ fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec< this.data = bytes .try_into() .expect("Data should fit within the allowed limits"); - build_post_state(this) + AccountPostState::new_claimed_if_default(this, Claim::Authorized) }; vec![from_post, to_post] @@ -78,6 +66,8 @@ fn main() { // Read input accounts. let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (function_id, data), }, @@ -95,5 +85,14 @@ fn main() { _ => panic!("invalid params"), }; - write_nssa_outputs(instruction_words, pre_states, post_states); + // WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be + // called to commit the output. + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states, + post_states, + ) + .write(); } diff --git a/examples/program_deployment/methods/guest/src/bin/simple_tail_call.rs b/examples/program_deployment/methods/guest/src/bin/simple_tail_call.rs index 01389085..716e5c29 100644 --- a/examples/program_deployment/methods/guest/src/bin/simple_tail_call.rs +++ b/examples/program_deployment/methods/guest/src/bin/simple_tail_call.rs @@ -1,6 +1,5 @@ use nssa_core::program::{ - AccountPostState, ChainedCall, ProgramId, ProgramInput, read_nssa_inputs, - write_nssa_outputs_with_chained_call, + AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs, }; // Tail Call example program. @@ -28,6 +27,8 @@ fn main() { // Read inputs let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (), }, @@ -53,11 +54,16 @@ fn main() { pda_seeds: vec![], }; - // Write the outputs - write_nssa_outputs_with_chained_call( + // Write the outputs. + // WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be + // called to commit the output. + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_data, vec![pre_state], vec![post_state], - vec![chained_call], - ); + ) + .with_chained_calls(vec![chained_call]) + .write(); } diff --git a/examples/program_deployment/methods/guest/src/bin/tail_call_with_pda.rs b/examples/program_deployment/methods/guest/src/bin/tail_call_with_pda.rs index 3ebcabd2..5ec9aaab 100644 --- a/examples/program_deployment/methods/guest/src/bin/tail_call_with_pda.rs +++ b/examples/program_deployment/methods/guest/src/bin/tail_call_with_pda.rs @@ -1,6 +1,6 @@ use nssa_core::program::{ - AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, read_nssa_inputs, - write_nssa_outputs_with_chained_call, + AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput, + read_nssa_inputs, }; // Tail Call with PDA example program. @@ -33,6 +33,8 @@ fn main() { // Read inputs let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (), }, @@ -65,11 +67,16 @@ fn main() { pda_seeds: vec![PDA_SEED], }; - // Write the outputs - write_nssa_outputs_with_chained_call( + // Write the outputs. + // WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be + // called to commit the output. + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_data, vec![pre_state], vec![post_state], - vec![chained_call], - ); + ) + .with_chained_calls(vec![chained_call]) + .write(); } diff --git a/examples/program_deployment/src/bin/run_hello_world.rs b/examples/program_deployment/src/bin/run_hello_world.rs index 3c0c9034..3d89b1a4 100644 --- a/examples/program_deployment/src/bin/run_hello_world.rs +++ b/examples/program_deployment/src/bin/run_hello_world.rs @@ -1,8 +1,10 @@ +use common::transaction::NSSATransaction; use nssa::{ AccountId, PublicTransaction, program::Program, public_transaction::{Message, WitnessSet}, }; +use sequencer_service_rpc::RpcClient as _; use wallet::WalletCore; // Before running this example, compile the `hello_world.rs` guest program with: @@ -58,7 +60,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); } diff --git a/examples/program_deployment/src/bin/run_hello_world_through_tail_call.rs b/examples/program_deployment/src/bin/run_hello_world_through_tail_call.rs index 56d28084..c3c75b5f 100644 --- a/examples/program_deployment/src/bin/run_hello_world_through_tail_call.rs +++ b/examples/program_deployment/src/bin/run_hello_world_through_tail_call.rs @@ -1,8 +1,10 @@ +use common::transaction::NSSATransaction; use nssa::{ AccountId, PublicTransaction, program::Program, public_transaction::{Message, WitnessSet}, }; +use sequencer_service_rpc::RpcClient as _; use wallet::WalletCore; // Before running this example, compile the `simple_tail_call.rs` guest program with: @@ -54,7 +56,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); } diff --git a/examples/program_deployment/src/bin/run_hello_world_with_authorization.rs b/examples/program_deployment/src/bin/run_hello_world_with_authorization.rs index f38443ac..a9750bce 100644 --- a/examples/program_deployment/src/bin/run_hello_world_with_authorization.rs +++ b/examples/program_deployment/src/bin/run_hello_world_with_authorization.rs @@ -1,9 +1,10 @@ +use common::transaction::NSSATransaction; use nssa::{ AccountId, PublicTransaction, program::Program, public_transaction::{Message, WitnessSet}, }; -use nssa_core::account::Nonce; +use sequencer_service_rpc::RpcClient as _; use wallet::WalletCore; // Before running this example, compile the `hello_world_with_authorization.rs` guest program with: @@ -63,13 +64,7 @@ async fn main() { .await .expect("Node should be reachable to query account data"); let signing_keys = [signing_key]; - let message = Message::try_new( - program.id(), - vec![account_id], - nonces.iter().map(|x| Nonce(*x)).collect(), - greeting, - ) - .unwrap(); + let message = Message::try_new(program.id(), vec![account_id], nonces, greeting).unwrap(); // Pass the signing key to sign the message. This will be used by the node // to flag the pre_state as `is_authorized` when executing the program let witness_set = WitnessSet::for_message(&message, &signing_keys); @@ -78,7 +73,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); } diff --git a/examples/program_deployment/src/bin/run_hello_world_with_authorization_through_tail_call_with_pda.rs b/examples/program_deployment/src/bin/run_hello_world_with_authorization_through_tail_call_with_pda.rs index 4371b000..e6a8ca99 100644 --- a/examples/program_deployment/src/bin/run_hello_world_with_authorization_through_tail_call_with_pda.rs +++ b/examples/program_deployment/src/bin/run_hello_world_with_authorization_through_tail_call_with_pda.rs @@ -3,12 +3,14 @@ reason = "This is an example program, it's fine to print to stdout" )] +use common::transaction::NSSATransaction; use nssa::{ AccountId, PublicTransaction, program::Program, public_transaction::{Message, WitnessSet}, }; use nssa_core::program::PdaSeed; +use sequencer_service_rpc::RpcClient as _; use wallet::WalletCore; // Before running this example, compile the `simple_tail_call.rs` guest program with: @@ -56,7 +58,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); diff --git a/examples/program_deployment/src/bin/run_hello_world_with_move_function.rs b/examples/program_deployment/src/bin/run_hello_world_with_move_function.rs index 0d4af502..a1c2517e 100644 --- a/examples/program_deployment/src/bin/run_hello_world_with_move_function.rs +++ b/examples/program_deployment/src/bin/run_hello_world_with_move_function.rs @@ -1,5 +1,7 @@ use clap::{Parser, Subcommand}; +use common::transaction::NSSATransaction; use nssa::{PublicTransaction, program::Program, public_transaction}; +use sequencer_service_rpc::RpcClient as _; use wallet::{PrivacyPreservingAccount, WalletCore}; // Before running this example, compile the `hello_world_with_move_function.rs` guest program with: @@ -87,7 +89,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); } @@ -126,7 +128,7 @@ async fn main() { // Submit the transaction let _response = wallet_core .sequencer_client - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await .unwrap(); } diff --git a/explorer_service/Dockerfile b/explorer_service/Dockerfile index 238e77e6..6484619f 100644 --- a/explorer_service/Dockerfile +++ b/explorer_service/Dockerfile @@ -22,7 +22,13 @@ WORKDIR /explorer_service COPY . . # Build the app -RUN cargo leptos build --release -vv +RUN --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/explorer_service/target \ + cargo leptos build --release -vv \ + && cp /explorer_service/target/release/explorer_service /usr/local/bin/explorer_service \ + && cp -r /explorer_service/target/site /explorer_service/site_output FROM debian:trixie-slim AS runtime WORKDIR /explorer_service @@ -33,10 +39,10 @@ RUN apt-get update -y \ && rm -rf /var/lib/apt/lists/* # Copy the server binary to the /explorer_service directory -COPY --from=builder /explorer_service/target/release/explorer_service /explorer_service/ +COPY --from=builder /usr/local/bin/explorer_service /explorer_service/ # /target/site contains our JS/WASM/CSS, etc. -COPY --from=builder /explorer_service/target/site /explorer_service/site +COPY --from=builder /explorer_service/site_output /explorer_service/site # Copy Cargo.toml as it’s needed at runtime COPY --from=builder /explorer_service/Cargo.toml /explorer_service/ diff --git a/explorer_service/src/api.rs b/explorer_service/src/api.rs index b37145af..8c2a0e36 100644 --- a/explorer_service/src/api.rs +++ b/explorer_service/src/api.rs @@ -41,12 +41,12 @@ pub async fn search(query: String) -> Result { // Try as hash if let Ok(hash) = HashType::from_str(&query) { // Try as block hash - if let Ok(block) = client.get_block_by_hash(hash).await { + if let Ok(Some(block)) = client.get_block_by_hash(hash).await { blocks.push(block); } // Try as transaction hash - if let Ok(tx) = client.get_transaction(hash).await { + if let Ok(Some(tx)) = client.get_transaction(hash).await { transactions.push(tx); } } @@ -60,7 +60,7 @@ pub async fn search(query: String) -> Result { // Try as block ID if let Ok(block_id) = query.parse::() - && let Ok(block) = client.get_block_by_id(block_id).await + && let Ok(Some(block)) = client.get_block_by_id(block_id).await { blocks.push(block); } @@ -81,6 +81,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result .get_block_by_id(block_id) .await .map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}"))) + .and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned()))) } /// Get latest block ID @@ -103,6 +104,7 @@ pub async fn get_block_by_hash(block_hash: HashType) -> Result Result impl IntoView { } = witness_set; let program_id_str = program_id.to_string(); - let proof_len = proof.0.len(); + let proof_len = proof.map_or(0, |p| p.0.len()); let signatures_count = signatures_and_public_keys.len(); view! { @@ -177,13 +177,14 @@ pub fn TransactionPage() -> impl IntoView { encrypted_private_post_states, new_commitments, new_nullifiers, + block_validity_window, + timestamp_validity_window, } = message; let WitnessSet { signatures_and_public_keys: _, proof, } = witness_set; - - let proof_len = proof.0.len(); + let proof_len = proof.map_or(0, |p| p.0.len()); view! {

"Privacy-Preserving Transaction Details"

@@ -212,6 +213,14 @@ pub fn TransactionPage() -> impl IntoView { "Proof Size:" {format!("{proof_len} bytes")}
+
+ "Block Validity Window:" + {block_validity_window.to_string()} +
+
+ "Timestamp Validity Window:" + {timestamp_validity_window.to_string()} +

"Public Accounts"

diff --git a/indexer/core/Cargo.toml b/indexer/core/Cargo.toml index 13e81088..33fe2d9d 100644 --- a/indexer/core/Cargo.toml +++ b/indexer/core/Cargo.toml @@ -13,6 +13,7 @@ bedrock_client.workspace = true nssa.workspace = true nssa_core.workspace = true storage.workspace = true +testnet_initial_state.workspace = true anyhow.workspace = true log.workspace = true diff --git a/indexer/core/src/block_store.rs b/indexer/core/src/block_store.rs index db2f855b..a76d1b26 100644 --- a/indexer/core/src/block_store.rs +++ b/indexer/core/src/block_store.rs @@ -3,17 +3,18 @@ use std::{path::Path, sync::Arc}; use anyhow::Result; use bedrock_client::HeaderId; use common::{ - block::{BedrockStatus, Block, BlockId}, - transaction::NSSATransaction, + block::{BedrockStatus, Block}, + transaction::{NSSATransaction, clock_invocation}, }; -use nssa::{Account, AccountId, V02State}; +use nssa::{Account, AccountId, V03State}; +use nssa_core::BlockId; use storage::indexer::RocksDBIO; use tokio::sync::RwLock; #[derive(Clone)] pub struct IndexerStore { dbio: Arc, - current_state: Arc>, + current_state: Arc>, } impl IndexerStore { @@ -24,7 +25,7 @@ impl IndexerStore { pub fn open_db_with_genesis( location: &Path, genesis_block: &Block, - initial_state: &V02State, + initial_state: &V03State, ) -> Result { let dbio = RocksDBIO::open_or_create(location, genesis_block, initial_state)?; let current_state = dbio.final_state()?; @@ -46,7 +47,7 @@ impl IndexerStore { Ok(self.dbio.get_meta_last_block_in_db()?) } - pub fn get_block_at_id(&self, id: u64) -> Result { + pub fn get_block_at_id(&self, id: u64) -> Result> { Ok(self.dbio.get_block(id)?) } @@ -54,20 +55,25 @@ impl IndexerStore { Ok(self.dbio.get_block_batch(before, limit)?) } - pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result { - let block = self.get_block_at_id(self.dbio.get_block_id_by_tx_hash(tx_hash)?)?; - let transaction = block + pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result> { + let Some(block_id) = self.dbio.get_block_id_by_tx_hash(tx_hash)? else { + return Ok(None); + }; + let Some(block) = self.get_block_at_id(block_id)? else { + return Ok(None); + }; + Ok(block .body .transactions - .iter() - .find(|enc_tx| enc_tx.hash().0 == tx_hash) - .ok_or_else(|| anyhow::anyhow!("Transaction not found in DB"))?; - - Ok(transaction.clone()) + .into_iter() + .find(|enc_tx| enc_tx.hash().0 == tx_hash)) } - pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result { - self.get_block_at_id(self.dbio.get_block_id_by_hash(hash)?) + pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result> { + let Some(id) = self.dbio.get_block_id_by_hash(hash)? else { + return Ok(None); + }; + self.get_block_at_id(id) } pub fn get_transactions_by_account( @@ -93,14 +99,14 @@ impl IndexerStore { .expect("Must be set at the DB startup") } - pub fn get_state_at_block(&self, block_id: u64) -> Result { + pub fn get_state_at_block(&self, block_id: u64) -> Result { Ok(self.dbio.calculate_state_for_id(block_id)?) } /// Recalculation of final state directly from DB. /// /// Used for indexer healthcheck. - pub fn recalculate_final_state(&self) -> Result { + pub fn recalculate_final_state(&self) -> Result { Ok(self.dbio.final_state()?) } @@ -116,12 +122,37 @@ impl IndexerStore { { let mut state_guard = self.current_state.write().await; - for transaction in &block.body.transactions { + let (clock_tx, user_txs) = block + .body + .transactions + .split_last() + .ok_or_else(|| anyhow::anyhow!("Block has no transactions"))?; + + anyhow::ensure!( + *clock_tx == NSSATransaction::Public(clock_invocation(block.header.timestamp)), + "Last transaction in block must be the clock invocation for the block timestamp" + ); + + for transaction in user_txs { transaction .clone() .transaction_stateless_check()? - .execute_check_on_state(&mut state_guard)?; + .execute_check_on_state( + &mut state_guard, + block.header.block_id, + block.header.timestamp, + )?; } + + // Apply the clock invocation directly (it is expected to modify clock accounts). + let NSSATransaction::Public(clock_public_tx) = clock_tx else { + anyhow::bail!("Clock invocation must be a public transaction"); + }; + state_guard.transition_from_public_transaction( + clock_public_tx, + block.header.block_id, + block.header.timestamp, + )?; } // ToDo: Currently we are fetching only finalized blocks @@ -167,11 +198,11 @@ mod tests { let storage = IndexerStore::open_db_with_genesis( home.as_ref(), &genesis_block(), - &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), ) .unwrap(); - let block = storage.get_block_at_id(1).unwrap(); + let block = storage.get_block_at_id(1).unwrap().unwrap(); let final_id = storage.get_last_block_id().unwrap(); assert_eq!(block.header.hash, genesis_block().header.hash); @@ -185,7 +216,7 @@ mod tests { let storage = IndexerStore::open_db_with_genesis( home.as_ref(), &genesis_block(), - &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), ) .unwrap(); @@ -203,11 +234,14 @@ mod tests { 10, &sign_key, ); + let block_id = u64::try_from(i).unwrap(); + let block_timestamp = block_id.saturating_mul(100); + let clock_tx = NSSATransaction::Public(clock_invocation(block_timestamp)); let next_block = common::test_utils::produce_dummy_block( - u64::try_from(i).unwrap(), + block_id, Some(prev_hash), - vec![tx], + vec![tx, clock_tx], ); prev_hash = next_block.header.hash; diff --git a/indexer/core/src/config.rs b/indexer/core/src/config.rs index a85284cc..291e54f5 100644 --- a/indexer/core/src/config.rs +++ b/indexer/core/src/config.rs @@ -7,13 +7,11 @@ use std::{ use anyhow::{Context as _, Result}; pub use bedrock_client::BackoffConfig; -use common::{ - block::{AccountInitialData, CommitmentsInitialData}, - config::BasicAuth, -}; +use common::config::BasicAuth; use humantime_serde; pub use logos_blockchain_core::mantle::ops::channel::ChannelId; use serde::{Deserialize, Serialize}; +use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData}; use url::Url; #[derive(Debug, Clone, Serialize, Deserialize)] @@ -29,16 +27,16 @@ pub struct ClientConfig { pub struct IndexerConfig { /// Home dir of sequencer storage. pub home: PathBuf, - /// List of initial accounts data. - pub initial_accounts: Vec, - /// List of initial commitments. - pub initial_commitments: Vec, /// Sequencers signing key. pub signing_key: [u8; 32], #[serde(with = "humantime_serde")] pub consensus_info_polling_interval: Duration, pub bedrock_client_config: ClientConfig, pub channel_id: ChannelId, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_public_accounts: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_private_accounts: Option>, } impl IndexerConfig { diff --git a/indexer/core/src/lib.rs b/indexer/core/src/lib.rs index 6c96821e..e68e97c0 100644 --- a/indexer/core/src/lib.rs +++ b/indexer/core/src/lib.rs @@ -2,14 +2,17 @@ use std::collections::VecDeque; use anyhow::Result; use bedrock_client::{BedrockClient, HeaderId}; -use common::block::{Block, HashableBlockData}; -// ToDo: Remove after testnet -use common::{HashType, PINATA_BASE58}; +use common::{ + HashType, PINATA_BASE58, + block::{Block, HashableBlockData}, +}; use log::{debug, error, info}; use logos_blockchain_core::mantle::{ Op, SignedMantleTx, ops::channel::{ChannelId, inscribe::InscriptionOp}, }; +use nssa::V03State; +use testnet_initial_state::initial_state_testnet; use crate::{block_store::IndexerStore, config::IndexerConfig}; @@ -54,36 +57,51 @@ impl IndexerCore { let channel_genesis_msg_id = [0; 32]; let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id); - // This is a troubling moment, because changes in key protocol can - // affect this. And indexer can not reliably ask this data from sequencer - // because indexer must be independent from it. - // ToDo: move initial state generation into common and use the same method - // for indexer and sequencer. This way both services buit at same version - // could be in sync. - let initial_commitments: Vec = config - .initial_commitments - .iter() - .map(|init_comm_data| { - let npk = &init_comm_data.npk; + let initial_commitments: Option> = config + .initial_private_accounts + .as_ref() + .map(|initial_commitments| { + initial_commitments + .iter() + .map(|init_comm_data| { + let npk = &init_comm_data.npk; - let mut acc = init_comm_data.account.clone(); + let mut acc = init_comm_data.account.clone(); - acc.program_owner = nssa::program::Program::authenticated_transfer_program().id(); + acc.program_owner = + nssa::program::Program::authenticated_transfer_program().id(); - nssa_core::Commitment::new(npk, &acc) - }) - .collect(); + nssa_core::Commitment::new(npk, &acc) + }) + .collect() + }); - let init_accs: Vec<(nssa::AccountId, u128)> = config - .initial_accounts - .iter() - .map(|acc_data| (acc_data.account_id, acc_data.balance)) - .collect(); + let init_accs: Option> = config + .initial_public_accounts + .as_ref() + .map(|initial_accounts| { + initial_accounts + .iter() + .map(|acc_data| (acc_data.account_id, acc_data.balance)) + .collect() + }); - let mut state = nssa::V02State::new_with_genesis_accounts(&init_accs, &initial_commitments); + // If initial commitments or accounts are present in config, need to construct state from + // them + let state = if initial_commitments.is_some() || init_accs.is_some() { + let mut state = V03State::new_with_genesis_accounts( + &init_accs.unwrap_or_default(), + &initial_commitments.unwrap_or_default(), + genesis_block.header.timestamp, + ); - // ToDo: Remove after testnet - state.add_pinata_program(PINATA_BASE58.parse().unwrap()); + // ToDo: Remove after testnet + state.add_pinata_program(PINATA_BASE58.parse().unwrap()); + + state + } else { + initial_state_testnet() + }; let home = config.home.join("rocksdb"); diff --git a/indexer/service/Cargo.toml b/indexer/service/Cargo.toml index 911121fd..a07a2285 100644 --- a/indexer/service/Cargo.toml +++ b/indexer/service/Cargo.toml @@ -21,7 +21,6 @@ log.workspace = true jsonrpsee.workspace = true serde_json.workspace = true futures.workspace = true -async-trait = "0.1.89" arc-swap = "1.8.1" [features] diff --git a/indexer/service/Dockerfile b/indexer/service/Dockerfile index bb93c2f2..cc7087bb 100644 --- a/indexer/service/Dockerfile +++ b/indexer/service/Dockerfile @@ -51,32 +51,34 @@ RUN cargo chef prepare --bin indexer_service --recipe-path recipe.json FROM chef AS builder COPY --from=planner /indexer_service/recipe.json recipe.json # Build dependencies only (this layer will be cached) -RUN cargo chef cook --bin indexer_service --release --recipe-path recipe.json +RUN --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/indexer_service/target \ + cargo chef cook --bin indexer_service --release --recipe-path recipe.json # Copy source code COPY . . -# Build the actual application -RUN cargo build --release --bin indexer_service - -# Strip debug symbols to reduce binary size -RUN strip /indexer_service/target/release/indexer_service +# Build the actual application and copy the binary out of the cache mount +RUN --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/indexer_service/target \ + cargo build --release --bin indexer_service \ + && strip /indexer_service/target/release/indexer_service \ + && cp /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service # Runtime stage - minimal image FROM debian:trixie-slim -# Install runtime dependencies -RUN apt-get update \ - && apt-get install -y gosu jq \ - && rm -rf /var/lib/apt/lists/* - # Create non-root user for security RUN useradd -m -u 1000 -s /bin/bash indexer_service_user && \ - mkdir -p /indexer_service /etc/indexer_service && \ - chown -R indexer_service_user:indexer_service_user /indexer_service /etc/indexer_service + mkdir -p /indexer_service /etc/indexer_service /var/lib/indexer_service && \ + chown -R indexer_service_user:indexer_service_user /indexer_service /etc/indexer_service /var/lib/indexer_service # Copy binary from builder -COPY --from=builder --chown=indexer_service_user:indexer_service_user /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service +COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local/bin/indexer_service /usr/local/bin/indexer_service # Copy r0vm binary from builder COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local/bin/r0vm /usr/local/bin/r0vm @@ -84,9 +86,7 @@ COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local # Copy logos blockchain circuits from builder COPY --from=builder --chown=indexer_service_user:indexer_service_user /root/.logos-blockchain-circuits /home/indexer_service_user/.logos-blockchain-circuits -# Copy entrypoint script -COPY indexer/service/docker-entrypoint.sh /docker-entrypoint.sh -RUN chmod +x /docker-entrypoint.sh +VOLUME /var/lib/indexer_service # Expose default port EXPOSE 8779 @@ -105,9 +105,7 @@ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ # Run the application ENV RUST_LOG=info -USER root - -ENTRYPOINT ["/docker-entrypoint.sh"] +USER indexer_service_user WORKDIR /indexer_service CMD ["indexer_service", "/etc/indexer_service/indexer_config.json"] diff --git a/indexer/service/configs/indexer_config.json b/indexer/service/configs/indexer_config.json index bcefffce..e4dd8f93 100644 --- a/indexer/service/configs/indexer_config.json +++ b/indexer/service/configs/indexer_config.json @@ -11,50 +11,50 @@ "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", "initial_accounts": [ { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", + "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r", "balance": 10000 }, { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", + "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2", "balance": 20000 } ], "initial_commitments": [ { - "npk":[ - 177, - 64, - 1, + "npk": [ + 139, + 19, + 158, 11, - 87, - 38, - 254, - 159, + 155, 231, - 165, - 1, - 94, - 64, - 137, - 243, - 76, - 249, - 101, - 251, - 129, - 33, - 101, - 189, - 30, - 42, - 11, - 191, - 34, - 103, - 186, - 227, - 230 - ] , + 85, + 206, + 132, + 228, + 220, + 114, + 145, + 89, + 113, + 156, + 238, + 142, + 242, + 74, + 182, + 91, + 43, + 100, + 6, + 190, + 31, + 15, + 31, + 88, + 96, + 204 + ], "account": { "program_owner": [ 0, @@ -73,38 +73,38 @@ }, { "npk": [ - 32, - 67, - 72, - 164, - 106, - 53, - 66, - 239, - 141, - 15, - 52, - 230, - 136, - 177, - 2, - 236, - 207, - 243, + 173, 134, - 135, - 210, - 143, - 87, - 232, + 33, + 223, + 54, + 226, + 10, + 71, 215, - 128, - 194, - 120, - 113, - 224, - 4, - 165 + 254, + 143, + 172, + 24, + 244, + 243, + 208, + 65, + 112, + 118, + 70, + 217, + 240, + 69, + 100, + 129, + 3, + 121, + 25, + 213, + 132, + 42, + 45 ], "account": { "program_owner": [ @@ -157,4 +157,4 @@ 37, 37 ] -} +} \ No newline at end of file diff --git a/indexer/service/docker-compose.yml b/indexer/service/docker-compose.yml index 73ac90ae..b690a180 100644 --- a/indexer/service/docker-compose.yml +++ b/indexer/service/docker-compose.yml @@ -10,5 +10,8 @@ services: volumes: # Mount configuration - ./configs/indexer_config.json:/etc/indexer_service/indexer_config.json - # Mount data folder - - ./data:/var/lib/indexer_service + # Mount data volume + - indexer_data:/var/lib/indexer_service + +volumes: + indexer_data: diff --git a/indexer/service/docker-entrypoint.sh b/indexer/service/docker-entrypoint.sh deleted file mode 100644 index 49a5f891..00000000 --- a/indexer/service/docker-entrypoint.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -# This is an entrypoint script for the indexer_service Docker container, -# it's not meant to be executed outside of the container. - -set -e - -CONFIG="/etc/indexer_service/indexer_config.json" - -# Check config file exists -if [ ! -f "$CONFIG" ]; then - echo "Config file not found: $CONFIG" >&2 - exit 1 -fi - -# Parse home dir -HOME_DIR=$(jq -r '.home' "$CONFIG") - -if [ -z "$HOME_DIR" ] || [ "$HOME_DIR" = "null" ]; then - echo "'home' key missing in config" >&2 - exit 1 -fi - -# Give permissions to the data directory and switch to non-root user -if [ "$(id -u)" = "0" ]; then - mkdir -p "$HOME_DIR" - chown -R indexer_service_user:indexer_service_user "$HOME_DIR" - exec gosu indexer_service_user "$@" -fi diff --git a/indexer/service/protocol/src/convert.rs b/indexer/service/protocol/src/convert.rs index 499baa4c..eb79fa34 100644 --- a/indexer/service/protocol/src/convert.rs +++ b/indexer/service/protocol/src/convert.rs @@ -7,7 +7,7 @@ use crate::{ CommitmentSetDigest, Data, EncryptedAccountData, EphemeralPublicKey, HashType, MantleMsgId, Nullifier, PrivacyPreservingMessage, PrivacyPreservingTransaction, ProgramDeploymentMessage, ProgramDeploymentTransaction, ProgramId, Proof, PublicKey, PublicMessage, PublicTransaction, - Signature, Transaction, WitnessSet, + Signature, Transaction, ValidityWindow, WitnessSet, }; // ============================================================================ @@ -287,6 +287,8 @@ impl From for PrivacyPre encrypted_private_post_states, new_commitments, new_nullifiers, + block_validity_window, + timestamp_validity_window, } = value; Self { public_account_ids: public_account_ids.into_iter().map(Into::into).collect(), @@ -301,12 +303,14 @@ impl From for PrivacyPre .into_iter() .map(|(n, d)| (n.into(), d.into())) .collect(), + block_validity_window: block_validity_window.into(), + timestamp_validity_window: timestamp_validity_window.into(), } } } impl TryFrom for nssa::privacy_preserving_transaction::message::Message { - type Error = nssa_core::account::data::DataTooBigError; + type Error = nssa::error::NssaError; fn try_from(value: PrivacyPreservingMessage) -> Result { let PrivacyPreservingMessage { @@ -316,6 +320,8 @@ impl TryFrom for nssa::privacy_preserving_transaction: encrypted_private_post_states, new_commitments, new_nullifiers, + block_validity_window, + timestamp_validity_window, } = value; Ok(Self { public_account_ids: public_account_ids.into_iter().map(Into::into).collect(), @@ -326,7 +332,8 @@ impl TryFrom for nssa::privacy_preserving_transaction: public_post_states: public_post_states .into_iter() .map(TryInto::try_into) - .collect::, _>>()?, + .collect::, _>>() + .map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?, encrypted_private_post_states: encrypted_private_post_states .into_iter() .map(Into::into) @@ -336,6 +343,12 @@ impl TryFrom for nssa::privacy_preserving_transaction: .into_iter() .map(|(n, d)| (n.into(), d.into())) .collect(), + block_validity_window: block_validity_window + .try_into() + .map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?, + timestamp_validity_window: timestamp_validity_window + .try_into() + .map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?, }) } } @@ -359,12 +372,16 @@ impl From for nssa::program_deployment_transaction::Me // WitnessSet conversions // ============================================================================ -impl TryFrom for WitnessSet { - type Error = (); - - fn try_from(_value: nssa::public_transaction::WitnessSet) -> Result { - // Public transaction witness sets don't have proofs, so we can't convert them directly - Err(()) +impl From for WitnessSet { + fn from(value: nssa::public_transaction::WitnessSet) -> Self { + Self { + signatures_and_public_keys: value + .signatures_and_public_keys() + .iter() + .map(|(sig, pk)| (sig.clone().into(), pk.clone().into())) + .collect(), + proof: None, + } } } @@ -376,7 +393,7 @@ impl From for Wit .into_iter() .map(|(sig, pk)| (sig.into(), pk.into())) .collect(), - proof: proof.into(), + proof: Some(proof.into()), } } } @@ -396,7 +413,9 @@ impl TryFrom for nssa::privacy_preserving_transaction::witness_set:: Ok(Self::from_raw_parts( signatures_and_public_keys, - proof.into(), + proof + .map(Into::into) + .ok_or_else(|| nssa::error::NssaError::InvalidInput("Missing proof".to_owned()))?, )) } } @@ -416,14 +435,7 @@ impl From for PublicTransaction { Self { hash, message: message.into(), - witness_set: WitnessSet { - signatures_and_public_keys: witness_set - .signatures_and_public_keys() - .iter() - .map(|(sig, pk)| (sig.clone().into(), pk.clone().into())) - .collect(), - proof: Proof(vec![]), // Public transactions don't have proofs - }, + witness_set: witness_set.into(), } } } @@ -480,14 +492,7 @@ impl TryFrom for nssa::PrivacyPreservingTransactio witness_set, } = value; - Ok(Self::new( - message - .try_into() - .map_err(|err: nssa_core::account::data::DataTooBigError| { - nssa::error::NssaError::InvalidInput(err.to_string()) - })?, - witness_set.try_into()?, - )) + Ok(Self::new(message.try_into()?, witness_set.try_into()?)) } } @@ -688,3 +693,21 @@ impl From for common::HashType { Self(value.0) } } + +// ============================================================================ +// ValidityWindow conversions +// ============================================================================ + +impl From> for ValidityWindow { + fn from(value: nssa_core::program::ValidityWindow) -> Self { + Self((value.start(), value.end())) + } +} + +impl TryFrom for nssa_core::program::ValidityWindow { + type Error = nssa_core::program::InvalidWindow; + + fn try_from(value: ValidityWindow) -> Result { + value.0.try_into() + } +} diff --git a/indexer/service/protocol/src/lib.rs b/indexer/service/protocol/src/lib.rs index 98ef5650..d554267e 100644 --- a/indexer/service/protocol/src/lib.rs +++ b/indexer/service/protocol/src/lib.rs @@ -138,7 +138,7 @@ pub struct Account { } pub type BlockId = u64; -pub type TimeStamp = u64; +pub type Timestamp = u64; #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub struct Block { @@ -153,7 +153,7 @@ pub struct BlockHeader { pub block_id: BlockId, pub prev_block_hash: HashType, pub hash: HashType, - pub timestamp: TimeStamp, + pub timestamp: Timestamp, pub signature: Signature, } @@ -235,12 +235,14 @@ pub struct PrivacyPreservingMessage { pub encrypted_private_post_states: Vec, pub new_commitments: Vec, pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>, + pub block_validity_window: ValidityWindow, + pub timestamp_validity_window: ValidityWindow, } #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub struct WitnessSet { pub signatures_and_public_keys: Vec<(Signature, PublicKey)>, - pub proof: Proof, + pub proof: Option, } #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] @@ -300,6 +302,20 @@ pub struct Nullifier( pub [u8; 32], ); +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] +pub struct ValidityWindow(pub (Option, Option)); + +impl Display for ValidityWindow { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.0 { + (Some(start), Some(end)) => write!(f, "[{start}, {end})"), + (Some(start), None) => write!(f, "[{start}, \u{221e})"), + (None, Some(end)) => write!(f, "(-\u{221e}, {end})"), + (None, None) => write!(f, "(-\u{221e}, \u{221e})"), + } + } +} + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub struct CommitmentSetDigest( #[serde(with = "base64::arr")] diff --git a/indexer/service/rpc/src/lib.rs b/indexer/service/rpc/src/lib.rs index be0e45ca..217c60d4 100644 --- a/indexer/service/rpc/src/lib.rs +++ b/indexer/service/rpc/src/lib.rs @@ -30,16 +30,22 @@ pub trait Rpc { async fn get_last_finalized_block_id(&self) -> Result; #[method(name = "getBlockById")] - async fn get_block_by_id(&self, block_id: BlockId) -> Result; + async fn get_block_by_id(&self, block_id: BlockId) -> Result, ErrorObjectOwned>; #[method(name = "getBlockByHash")] - async fn get_block_by_hash(&self, block_hash: HashType) -> Result; + async fn get_block_by_hash( + &self, + block_hash: HashType, + ) -> Result, ErrorObjectOwned>; #[method(name = "getAccount")] async fn get_account(&self, account_id: AccountId) -> Result; #[method(name = "getTransaction")] - async fn get_transaction(&self, tx_hash: HashType) -> Result; + async fn get_transaction( + &self, + tx_hash: HashType, + ) -> Result, ErrorObjectOwned>; #[method(name = "getBlocks")] async fn get_blocks( diff --git a/indexer/service/src/lib.rs b/indexer/service/src/lib.rs index 1f87e929..10f1cade 100644 --- a/indexer/service/src/lib.rs +++ b/indexer/service/src/lib.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use anyhow::{Context as _, Result}; pub use indexer_core::config::*; use indexer_service_rpc::RpcServer as _; -use jsonrpsee::server::Server; +use jsonrpsee::server::{Server, ServerHandle}; use log::{error, info}; pub mod service; @@ -13,10 +13,11 @@ pub mod mock_service; pub struct IndexerHandle { addr: SocketAddr, - server_handle: Option, + /// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`. + server_handle: Option, } impl IndexerHandle { - const fn new(addr: SocketAddr, server_handle: jsonrpsee::server::ServerHandle) -> Self { + const fn new(addr: SocketAddr, server_handle: ServerHandle) -> Self { Self { addr, server_handle: Some(server_handle), @@ -28,6 +29,7 @@ impl IndexerHandle { self.addr } + /// Wait for all Indexer tasks to stop. pub async fn stopped(mut self) { let handle = self .server_handle @@ -37,15 +39,11 @@ impl IndexerHandle { handle.stopped().await; } - #[expect( - clippy::redundant_closure_for_method_calls, - reason = "Clippy suggested path jsonrpsee::jsonrpsee_server::ServerHandle is not accessible" - )] #[must_use] - pub fn is_stopped(&self) -> bool { + pub fn is_healthy(&self) -> bool { self.server_handle .as_ref() - .is_none_or(|handle| handle.is_stopped()) + .is_some_and(|handle| !handle.is_stopped()) } } diff --git a/indexer/service/src/mock_service.rs b/indexer/service/src/mock_service.rs index bc131740..09ae96f5 100644 --- a/indexer/service/src/mock_service.rs +++ b/indexer/service/src/mock_service.rs @@ -13,9 +13,12 @@ use indexer_service_protocol::{ CommitmentSetDigest, Data, EncryptedAccountData, HashType, MantleMsgId, PrivacyPreservingMessage, PrivacyPreservingTransaction, ProgramDeploymentMessage, ProgramDeploymentTransaction, ProgramId, PublicMessage, PublicTransaction, Signature, - Transaction, WitnessSet, + Transaction, ValidityWindow, WitnessSet, +}; +use jsonrpsee::{ + core::{SubscriptionResult, async_trait}, + types::ErrorObjectOwned, }; -use jsonrpsee::{core::SubscriptionResult, types::ErrorObjectOwned}; /// A mock implementation of the `IndexerService` RPC for testing purposes. pub struct MockIndexerService { @@ -92,7 +95,7 @@ impl MockIndexerService { }, witness_set: WitnessSet { signatures_and_public_keys: vec![], - proof: indexer_service_protocol::Proof(vec![0; 32]), + proof: None, }, }), // PrivacyPreserving transactions @@ -121,10 +124,12 @@ impl MockIndexerService { indexer_service_protocol::Nullifier([tx_idx as u8; 32]), CommitmentSetDigest([0xff; 32]), )], + block_validity_window: ValidityWindow((None, None)), + timestamp_validity_window: ValidityWindow((None, None)), }, witness_set: WitnessSet { signatures_and_public_keys: vec![], - proof: indexer_service_protocol::Proof(vec![0; 32]), + proof: Some(indexer_service_protocol::Proof(vec![0; 32])), }, }), // ProgramDeployment transactions (rare) @@ -171,7 +176,7 @@ impl MockIndexerService { } } -#[async_trait::async_trait] +#[async_trait] impl indexer_service_rpc::RpcServer for MockIndexerService { async fn subscribe_to_finalized_blocks( &self, @@ -198,26 +203,23 @@ impl indexer_service_rpc::RpcServer for MockIndexerService { }) } - async fn get_block_by_id(&self, block_id: BlockId) -> Result { - self.blocks + async fn get_block_by_id(&self, block_id: BlockId) -> Result, ErrorObjectOwned> { + Ok(self + .blocks .iter() .find(|b| b.header.block_id == block_id) - .cloned() - .ok_or_else(|| { - ErrorObjectOwned::owned( - -32001, - format!("Block with ID {block_id} not found"), - None::<()>, - ) - }) + .cloned()) } - async fn get_block_by_hash(&self, block_hash: HashType) -> Result { - self.blocks + async fn get_block_by_hash( + &self, + block_hash: HashType, + ) -> Result, ErrorObjectOwned> { + Ok(self + .blocks .iter() .find(|b| b.header.hash == block_hash) - .cloned() - .ok_or_else(|| ErrorObjectOwned::owned(-32001, "Block with hash not found", None::<()>)) + .cloned()) } async fn get_account(&self, account_id: AccountId) -> Result { @@ -227,11 +229,11 @@ impl indexer_service_rpc::RpcServer for MockIndexerService { .ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>)) } - async fn get_transaction(&self, tx_hash: HashType) -> Result { - self.transactions - .get(&tx_hash) - .map(|(tx, _)| tx.clone()) - .ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>)) + async fn get_transaction( + &self, + tx_hash: HashType, + ) -> Result, ErrorObjectOwned> { + Ok(self.transactions.get(&tx_hash).map(|(tx, _)| tx.clone())) } async fn get_blocks( diff --git a/indexer/service/src/service.rs b/indexer/service/src/service.rs index 256ef33d..e2f8a321 100644 --- a/indexer/service/src/service.rs +++ b/indexer/service/src/service.rs @@ -7,7 +7,7 @@ use indexer_core::{IndexerCore, config::IndexerConfig}; use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Transaction}; use jsonrpsee::{ SubscriptionSink, - core::{Serialize, SubscriptionResult}, + core::{Serialize, SubscriptionResult, async_trait}, types::{ErrorCode, ErrorObject, ErrorObjectOwned}, }; use log::{debug, error, info, warn}; @@ -30,7 +30,7 @@ impl IndexerService { } } -#[async_trait::async_trait] +#[async_trait] impl indexer_service_rpc::RpcServer for IndexerService { async fn subscribe_to_finalized_blocks( &self, @@ -52,22 +52,25 @@ impl indexer_service_rpc::RpcServer for IndexerService { self.indexer.store.get_last_block_id().map_err(db_error) } - async fn get_block_by_id(&self, block_id: BlockId) -> Result { + async fn get_block_by_id(&self, block_id: BlockId) -> Result, ErrorObjectOwned> { Ok(self .indexer .store .get_block_at_id(block_id) .map_err(db_error)? - .into()) + .map(Into::into)) } - async fn get_block_by_hash(&self, block_hash: HashType) -> Result { + async fn get_block_by_hash( + &self, + block_hash: HashType, + ) -> Result, ErrorObjectOwned> { Ok(self .indexer .store .get_block_by_hash(block_hash.0) .map_err(db_error)? - .into()) + .map(Into::into)) } async fn get_account(&self, account_id: AccountId) -> Result { @@ -80,13 +83,16 @@ impl indexer_service_rpc::RpcServer for IndexerService { .into()) } - async fn get_transaction(&self, tx_hash: HashType) -> Result { + async fn get_transaction( + &self, + tx_hash: HashType, + ) -> Result, ErrorObjectOwned> { Ok(self .indexer .store .get_transaction_by_hash(tx_hash.0) .map_err(db_error)? - .into()) + .map(Into::into)) } async fn get_blocks( diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index b18b782f..cb5277d2 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -11,26 +11,27 @@ workspace = true nssa_core = { workspace = true, features = ["host"] } nssa.workspace = true sequencer_core = { workspace = true, features = ["default", "testnet"] } -sequencer_runner.workspace = true +sequencer_service.workspace = true wallet.workspace = true common.workspace = true key_protocol.workspace = true indexer_service.workspace = true serde_json.workspace = true token_core.workspace = true +ata_core.workspace = true indexer_service_rpc.workspace = true +sequencer_service_rpc = { workspace = true, features = ["client"] } wallet-ffi.workspace = true +testnet_initial_state.workspace = true url.workspace = true anyhow.workspace = true env_logger.workspace = true log.workspace = true -base64.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } hex.workspace = true tempfile.workspace = true -borsh.workspace = true bytesize.workspace = true futures.workspace = true testcontainers = { version = "0.27.0", features = ["docker-compose"] } diff --git a/integration_tests/src/config.rs b/integration_tests/src/config.rs index 4d8539cc..1dd726eb 100644 --- a/integration_tests/src/config.rs +++ b/integration_tests/src/config.rs @@ -2,16 +2,17 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration}; use anyhow::{Context as _, Result}; use bytesize::ByteSize; -use common::block::{AccountInitialData, CommitmentsInitialData}; use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig}; use key_protocol::key_management::KeyChain; use nssa::{Account, AccountId, PrivateKey, PublicKey}; use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID}; use sequencer_core::config::{BedrockConfig, SequencerConfig}; -use url::Url; -use wallet::config::{ - InitialAccountData, InitialAccountDataPrivate, InitialAccountDataPublic, WalletConfig, +use testnet_initial_state::{ + PrivateAccountPrivateInitialData, PrivateAccountPublicInitialData, + PublicAccountPrivateInitialData, PublicAccountPublicInitialData, }; +use url::Url; +use wallet::config::{InitialAccountData, WalletConfig}; /// Sequencer config options available for custom changes in integration tests. #[derive(Debug, Clone, Copy)] @@ -59,11 +60,11 @@ impl InitialData { let mut private_charlie_key_chain = KeyChain::new_os_random(); let mut private_charlie_account_id = - AccountId::from(&private_charlie_key_chain.nullifer_public_key); + AccountId::from(&private_charlie_key_chain.nullifier_public_key); let mut private_david_key_chain = KeyChain::new_os_random(); let mut private_david_account_id = - AccountId::from(&private_david_key_chain.nullifer_public_key); + AccountId::from(&private_david_key_chain.nullifier_public_key); // Ensure consistent ordering if private_charlie_account_id > private_david_account_id { @@ -102,13 +103,13 @@ impl InitialData { } } - fn sequencer_initial_accounts(&self) -> Vec { + fn sequencer_initial_public_accounts(&self) -> Vec { self.public_accounts .iter() .map(|(priv_key, balance)| { let pub_key = PublicKey::new_from_private_key(priv_key); let account_id = AccountId::from(&pub_key); - AccountInitialData { + PublicAccountPublicInitialData { account_id, balance: *balance, } @@ -116,11 +117,11 @@ impl InitialData { .collect() } - fn sequencer_initial_commitments(&self) -> Vec { + fn sequencer_initial_private_accounts(&self) -> Vec { self.private_accounts .iter() - .map(|(key_chain, account)| CommitmentsInitialData { - npk: key_chain.nullifer_public_key.clone(), + .map(|(key_chain, account)| PrivateAccountPublicInitialData { + npk: key_chain.nullifier_public_key.clone(), account: account.clone(), }) .collect() @@ -132,14 +133,14 @@ impl InitialData { .map(|(priv_key, _)| { let pub_key = PublicKey::new_from_private_key(priv_key); let account_id = AccountId::from(&pub_key); - InitialAccountData::Public(InitialAccountDataPublic { + InitialAccountData::Public(PublicAccountPrivateInitialData { account_id, pub_sign_key: priv_key.clone(), }) }) .chain(self.private_accounts.iter().map(|(key_chain, account)| { - let account_id = AccountId::from(&key_chain.nullifer_public_key); - InitialAccountData::Private(Box::new(InitialAccountDataPrivate { + let account_id = AccountId::from(&key_chain.nullifier_public_key); + InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData { account_id, account: account.clone(), key_chain: key_chain.clone(), @@ -181,8 +182,8 @@ pub fn indexer_config( max_retries: 10, }, }, - initial_accounts: initial_data.sequencer_initial_accounts(), - initial_commitments: initial_data.sequencer_initial_commitments(), + initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()), + initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()), signing_key: [37; 32], channel_id: bedrock_channel_id(), }) @@ -204,17 +205,15 @@ pub fn sequencer_config( Ok(SequencerConfig { home, - override_rust_log: None, genesis_id: 1, is_genesis_random: true, max_num_tx_in_block, max_block_size, mempool_max_size, block_create_timeout, - retry_pending_blocks_timeout: Duration::from_secs(120), - port: 0, - initial_accounts: initial_data.sequencer_initial_accounts(), - initial_commitments: initial_data.sequencer_initial_commitments(), + retry_pending_blocks_timeout: Duration::from_mins(2), + initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()), + initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()), signing_key: [37; 32], bedrock_config: BedrockConfig { backoff: BackoffConfig { @@ -236,14 +235,13 @@ pub fn wallet_config( initial_data: &InitialData, ) -> Result { Ok(WalletConfig { - override_rust_log: None, sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr) .context("Failed to convert sequencer addr to URL")?, seq_poll_timeout: Duration::from_secs(30), seq_tx_poll_max_blocks: 15, seq_poll_max_retries: 10, seq_block_poll_max_amount: 100, - initial_accounts: initial_data.wallet_initial_accounts(), + initial_accounts: Some(initial_data.wallet_initial_accounts()), basic_auth: None, }) } diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index 6929ec92..a4381acf 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -3,15 +3,15 @@ use std::{net::SocketAddr, path::PathBuf, sync::LazyLock}; use anyhow::{Context as _, Result, bail}; -use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; -use common::{HashType, sequencer_client::SequencerClient, transaction::NSSATransaction}; +use common::{HashType, transaction::NSSATransaction}; use futures::FutureExt as _; use indexer_service::IndexerHandle; use log::{debug, error, warn}; use nssa::{AccountId, PrivacyPreservingTransaction}; use nssa_core::Commitment; use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _}; -use sequencer_runner::SequencerHandle; +use sequencer_service::SequencerHandle; +use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder}; use tempfile::TempDir; use testcontainers::compose::DockerCompose; use wallet::{WalletCore, config::WalletConfigOverrides}; @@ -38,7 +38,8 @@ pub struct TestContext { indexer_client: IndexerClient, wallet: WalletCore, wallet_password: String, - sequencer_handle: SequencerHandle, + /// Optional to move out value in Drop. + sequencer_handle: Option, indexer_handle: IndexerHandle, bedrock_compose: DockerCompose, _temp_indexer_dir: TempDir, @@ -90,8 +91,9 @@ impl TestContext { .context("Failed to convert sequencer addr to URL")?; let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr()) .context("Failed to convert indexer addr to URL")?; - let sequencer_client = - SequencerClient::new(sequencer_url).context("Failed to create sequencer client")?; + let sequencer_client = SequencerClientBuilder::default() + .build(sequencer_url) + .context("Failed to create sequencer client")?; let indexer_client = IndexerClient::new(&indexer_url) .await .context("Failed to create indexer client")?; @@ -102,7 +104,7 @@ impl TestContext { wallet, wallet_password, bedrock_compose, - sequencer_handle, + sequencer_handle: Some(sequencer_handle), indexer_handle, _temp_indexer_dir: temp_indexer_dir, _temp_sequencer_dir: temp_sequencer_dir, @@ -229,7 +231,7 @@ impl TestContext { ) .context("Failed to create Sequencer config")?; - let sequencer_handle = sequencer_runner::startup_sequencer(config).await?; + let sequencer_handle = sequencer_service::run(config, 0).await?; Ok((sequencer_handle, temp_sequencer_dir)) } @@ -254,11 +256,11 @@ impl TestContext { let config_overrides = WalletConfigOverrides::default(); let wallet_password = "test_pass".to_owned(); - let wallet = WalletCore::new_init_storage( + let (wallet, _mnemonic) = WalletCore::new_init_storage( config_path, storage_path, Some(config_overrides), - wallet_password.clone(), + &wallet_password, ) .context("Failed to init wallet")?; wallet @@ -333,18 +335,20 @@ impl Drop for TestContext { wallet_password: _, } = self; - if sequencer_handle.is_finished() { - let Err(err) = self - .sequencer_handle - .run_forever() + let sequencer_handle = sequencer_handle + .take() + .expect("Sequencer handle should be present in TestContext drop"); + if !sequencer_handle.is_healthy() { + let Err(err) = sequencer_handle + .failed() .now_or_never() - .expect("Future is finished and should be ready"); + .expect("Sequencer handle should not be running"); error!( - "Sequencer handle has unexpectedly finished before TestContext drop with error: {err:#}" + "Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}" ); } - if indexer_handle.is_stopped() { + if !indexer_handle.is_healthy() { error!("Indexer handle has unexpectedly stopped before TestContext drop"); } @@ -459,15 +463,8 @@ pub async fn fetch_privacy_preserving_tx( seq_client: &SequencerClient, tx_hash: HashType, ) -> PrivacyPreservingTransaction { - let transaction_encoded = seq_client - .get_transaction_by_hash(tx_hash) - .await - .unwrap() - .transaction - .unwrap(); + let tx = seq_client.get_transaction(tx_hash).await.unwrap().unwrap(); - let tx_bytes = BASE64.decode(transaction_encoded).unwrap(); - let tx = borsh::from_slice(&tx_bytes).unwrap(); match tx { NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => { privacy_preserving_transaction @@ -480,8 +477,10 @@ pub async fn verify_commitment_is_in_state( commitment: Commitment, seq_client: &SequencerClient, ) -> bool { - matches!( - seq_client.get_proof_for_commitment(commitment).await, - Ok(Some(_)) - ) + seq_client + .get_proof_for_commitment(commitment) + .await + .ok() + .flatten() + .is_some() } diff --git a/integration_tests/tests/account.rs b/integration_tests/tests/account.rs index 3f1d0993..60c1aeaa 100644 --- a/integration_tests/tests/account.rs +++ b/integration_tests/tests/account.rs @@ -7,6 +7,7 @@ use anyhow::Result; use integration_tests::TestContext; use log::info; use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, @@ -21,8 +22,7 @@ async fn get_existing_account() -> Result<()> { let account = ctx .sequencer_client() .get_account(ctx.existing_public_accounts()[0]) - .await? - .account; + .await?; assert_eq!( account.program_owner, diff --git a/integration_tests/tests/amm.rs b/integration_tests/tests/amm.rs index bdb2da72..d9ecb831 100644 --- a/integration_tests/tests/amm.rs +++ b/integration_tests/tests/amm.rs @@ -9,6 +9,7 @@ use std::time::Duration; use anyhow::Result; use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id}; use log::info; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, SubcommandReturnValue, @@ -194,20 +195,14 @@ async fn amm_public() -> Result<()> { let user_holding_a_acc = ctx .sequencer_client() .get_account(recipient_account_id_1) - .await? - .account; + .await?; let user_holding_b_acc = ctx .sequencer_client() .get_account(recipient_account_id_2) - .await? - .account; + .await?; - let user_holding_lp_acc = ctx - .sequencer_client() - .get_account(user_holding_lp) - .await? - .account; + let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?; assert_eq!( u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), @@ -228,7 +223,7 @@ async fn amm_public() -> Result<()> { // Make swap - let subcommand = AmmProgramAgnosticSubcommand::Swap { + let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput { user_holding_a: format_public_account_id(recipient_account_id_1), user_holding_b: format_public_account_id(recipient_account_id_2), amount_in: 2, @@ -243,20 +238,14 @@ async fn amm_public() -> Result<()> { let user_holding_a_acc = ctx .sequencer_client() .get_account(recipient_account_id_1) - .await? - .account; + .await?; let user_holding_b_acc = ctx .sequencer_client() .get_account(recipient_account_id_2) - .await? - .account; + .await?; - let user_holding_lp_acc = ctx - .sequencer_client() - .get_account(user_holding_lp) - .await? - .account; + let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?; assert_eq!( u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), @@ -277,7 +266,7 @@ async fn amm_public() -> Result<()> { // Make swap - let subcommand = AmmProgramAgnosticSubcommand::Swap { + let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput { user_holding_a: format_public_account_id(recipient_account_id_1), user_holding_b: format_public_account_id(recipient_account_id_2), amount_in: 2, @@ -292,20 +281,14 @@ async fn amm_public() -> Result<()> { let user_holding_a_acc = ctx .sequencer_client() .get_account(recipient_account_id_1) - .await? - .account; + .await?; let user_holding_b_acc = ctx .sequencer_client() .get_account(recipient_account_id_2) - .await? - .account; + .await?; - let user_holding_lp_acc = ctx - .sequencer_client() - .get_account(user_holding_lp) - .await? - .account; + let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?; assert_eq!( u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), @@ -342,20 +325,14 @@ async fn amm_public() -> Result<()> { let user_holding_a_acc = ctx .sequencer_client() .get_account(recipient_account_id_1) - .await? - .account; + .await?; let user_holding_b_acc = ctx .sequencer_client() .get_account(recipient_account_id_2) - .await? - .account; + .await?; - let user_holding_lp_acc = ctx - .sequencer_client() - .get_account(user_holding_lp) - .await? - .account; + let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?; assert_eq!( u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), @@ -392,20 +369,14 @@ async fn amm_public() -> Result<()> { let user_holding_a_acc = ctx .sequencer_client() .get_account(recipient_account_id_1) - .await? - .account; + .await?; let user_holding_b_acc = ctx .sequencer_client() .get_account(recipient_account_id_2) - .await? - .account; + .await?; - let user_holding_lp_acc = ctx - .sequencer_client() - .get_account(user_holding_lp) - .await? - .account; + let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?; assert_eq!( u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), diff --git a/integration_tests/tests/ata.rs b/integration_tests/tests/ata.rs new file mode 100644 index 00000000..94ba98c9 --- /dev/null +++ b/integration_tests/tests/ata.rs @@ -0,0 +1,656 @@ +#![expect( + clippy::shadow_unrelated, + clippy::tests_outside_test_module, + reason = "We don't care about these in tests" +)] + +use std::time::Duration; + +use anyhow::{Context as _, Result}; +use ata_core::{compute_ata_seed, get_associated_token_account_id}; +use integration_tests::{ + TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id, + format_public_account_id, verify_commitment_is_in_state, +}; +use log::info; +use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; +use token_core::{TokenDefinition, TokenHolding}; +use tokio::test; +use wallet::cli::{ + Command, SubcommandReturnValue, + account::{AccountSubcommand, NewSubcommand}, + programs::{ata::AtaSubcommand, token::TokenProgramAgnosticSubcommand}, +}; + +/// Create a public account and return its ID. +async fn new_public_account(ctx: &mut TestContext) -> Result { + let result = wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Account(AccountSubcommand::New(NewSubcommand::Public { + cci: None, + label: None, + })), + ) + .await?; + let SubcommandReturnValue::RegisterAccount { account_id } = result else { + anyhow::bail!("Expected RegisterAccount return value"); + }; + Ok(account_id) +} + +/// Create a private account and return its ID. +async fn new_private_account(ctx: &mut TestContext) -> Result { + let result = wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Account(AccountSubcommand::New(NewSubcommand::Private { + cci: None, + label: None, + })), + ) + .await?; + let SubcommandReturnValue::RegisterAccount { account_id } = result else { + anyhow::bail!("Expected RegisterAccount return value"); + }; + Ok(account_id) +} + +#[test] +async fn create_ata_initializes_holding_account() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let owner_account_id = new_public_account(&mut ctx).await?; + + // Create a fungible token + let total_supply = 100_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Create the ATA for owner + definition + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(owner_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Derive expected ATA address and check on-chain state + let ata_program_id = Program::ata().id(); + let ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_account_id, definition_account_id), + ); + + let ata_acc = ctx + .sequencer_client() + .get_account(ata_id) + .await + .context("ATA account not found")?; + + assert_eq!(ata_acc.program_owner, Program::token().id()); + let holding = TokenHolding::try_from(&ata_acc.data)?; + assert_eq!( + holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: 0, + } + ); + + Ok(()) +} + +#[test] +async fn create_ata_is_idempotent() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let owner_account_id = new_public_account(&mut ctx).await?; + + // Create a fungible token + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply: 100, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Create the ATA once + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(owner_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Create the ATA a second time — must succeed (idempotent) + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(owner_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // State must be unchanged + let ata_program_id = Program::ata().id(); + let ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_account_id, definition_account_id), + ); + + let ata_acc = ctx + .sequencer_client() + .get_account(ata_id) + .await + .context("ATA account not found")?; + + assert_eq!(ata_acc.program_owner, Program::token().id()); + let holding = TokenHolding::try_from(&ata_acc.data)?; + assert_eq!( + holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: 0, + } + ); + + Ok(()) +} + +#[test] +async fn transfer_and_burn_via_ata() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let sender_account_id = new_public_account(&mut ctx).await?; + let recipient_account_id = new_public_account(&mut ctx).await?; + + let total_supply = 1000_u128; + + // Create a fungible token, supply goes to supply_account_id + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Derive ATA addresses + let ata_program_id = Program::ata().id(); + let sender_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(sender_account_id, definition_account_id), + ); + let recipient_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(recipient_account_id, definition_account_id), + ); + + // Create ATAs for sender and recipient + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(sender_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(recipient_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Fund sender's ATA from the supply account (direct token transfer) + let fund_amount = 200_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::Send { + from: format_public_account_id(supply_account_id), + to: Some(format_public_account_id(sender_ata_id)), + to_npk: None, + to_vpk: None, + amount: fund_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Transfer from sender's ATA to recipient's ATA via the ATA program + let transfer_amount = 50_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Send { + from: format_public_account_id(sender_account_id), + token_definition: definition_account_id.to_string(), + to: recipient_ata_id.to_string(), + amount: transfer_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Verify sender ATA balance decreased + let sender_ata_acc = ctx.sequencer_client().get_account(sender_ata_id).await?; + let sender_holding = TokenHolding::try_from(&sender_ata_acc.data)?; + assert_eq!( + sender_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: fund_amount - transfer_amount, + } + ); + + // Verify recipient ATA balance increased + let recipient_ata_acc = ctx.sequencer_client().get_account(recipient_ata_id).await?; + let recipient_holding = TokenHolding::try_from(&recipient_ata_acc.data)?; + assert_eq!( + recipient_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: transfer_amount, + } + ); + + // Burn from sender's ATA + let burn_amount = 30_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Burn { + holder: format_public_account_id(sender_account_id), + token_definition: definition_account_id.to_string(), + amount: burn_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Verify sender ATA balance after burn + let sender_ata_acc = ctx.sequencer_client().get_account(sender_ata_id).await?; + let sender_holding = TokenHolding::try_from(&sender_ata_acc.data)?; + assert_eq!( + sender_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: fund_amount - transfer_amount - burn_amount, + } + ); + + // Verify the token definition total_supply decreased by burn_amount + let definition_acc = ctx + .sequencer_client() + .get_account(definition_account_id) + .await?; + let token_definition = TokenDefinition::try_from(&definition_acc.data)?; + assert_eq!( + token_definition, + TokenDefinition::Fungible { + name: "TEST".to_owned(), + total_supply: total_supply - burn_amount, + metadata_id: None, + } + ); + + Ok(()) +} + +#[test] +async fn create_ata_with_private_owner() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let owner_account_id = new_private_account(&mut ctx).await?; + + // Create a fungible token + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply: 100, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Create the ATA for the private owner + definition + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_private_account_id(owner_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Derive expected ATA address and check on-chain state + let ata_program_id = Program::ata().id(); + let ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_account_id, definition_account_id), + ); + + let ata_acc = ctx + .sequencer_client() + .get_account(ata_id) + .await + .context("ATA account not found")?; + + assert_eq!(ata_acc.program_owner, Program::token().id()); + let holding = TokenHolding::try_from(&ata_acc.data)?; + assert_eq!( + holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: 0, + } + ); + + // Verify the private owner's commitment is in state + let commitment = ctx + .wallet() + .get_private_account_commitment(owner_account_id) + .context("Private owner commitment not found")?; + assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await); + + Ok(()) +} + +#[test] +async fn transfer_via_ata_private_owner() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let sender_account_id = new_private_account(&mut ctx).await?; + let recipient_account_id = new_public_account(&mut ctx).await?; + + let total_supply = 1000_u128; + + // Create a fungible token + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Derive ATA addresses + let ata_program_id = Program::ata().id(); + let sender_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(sender_account_id, definition_account_id), + ); + let recipient_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(recipient_account_id, definition_account_id), + ); + + // Create ATAs for sender (private owner) and recipient (public owner) + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_private_account_id(sender_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_public_account_id(recipient_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Fund sender's ATA from the supply account (direct token transfer) + let fund_amount = 200_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::Send { + from: format_public_account_id(supply_account_id), + to: Some(format_public_account_id(sender_ata_id)), + to_npk: None, + to_vpk: None, + amount: fund_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Transfer from sender's ATA (private owner) to recipient's ATA + let transfer_amount = 50_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Send { + from: format_private_account_id(sender_account_id), + token_definition: definition_account_id.to_string(), + to: recipient_ata_id.to_string(), + amount: transfer_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Verify sender ATA balance decreased + let sender_ata_acc = ctx.sequencer_client().get_account(sender_ata_id).await?; + let sender_holding = TokenHolding::try_from(&sender_ata_acc.data)?; + assert_eq!( + sender_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: fund_amount - transfer_amount, + } + ); + + // Verify recipient ATA balance increased + let recipient_ata_acc = ctx.sequencer_client().get_account(recipient_ata_id).await?; + let recipient_holding = TokenHolding::try_from(&recipient_ata_acc.data)?; + assert_eq!( + recipient_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: transfer_amount, + } + ); + + // Verify the private sender's commitment is in state + let commitment = ctx + .wallet() + .get_private_account_commitment(sender_account_id) + .context("Private sender commitment not found")?; + assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await); + + Ok(()) +} + +#[test] +async fn burn_via_ata_private_owner() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let definition_account_id = new_public_account(&mut ctx).await?; + let supply_account_id = new_public_account(&mut ctx).await?; + let holder_account_id = new_private_account(&mut ctx).await?; + + let total_supply = 500_u128; + + // Create a fungible token + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::New { + definition_account_id: format_public_account_id(definition_account_id), + supply_account_id: format_public_account_id(supply_account_id), + name: "TEST".to_owned(), + total_supply, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Derive holder's ATA address + let ata_program_id = Program::ata().id(); + let holder_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(holder_account_id, definition_account_id), + ); + + // Create ATA for the private holder + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Create { + owner: format_private_account_id(holder_account_id), + token_definition: definition_account_id.to_string(), + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Fund holder's ATA from the supply account + let fund_amount = 300_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Token(TokenProgramAgnosticSubcommand::Send { + from: format_public_account_id(supply_account_id), + to: Some(format_public_account_id(holder_ata_id)), + to_npk: None, + to_vpk: None, + amount: fund_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Burn from holder's ATA (private owner) + let burn_amount = 100_u128; + wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Ata(AtaSubcommand::Burn { + holder: format_private_account_id(holder_account_id), + token_definition: definition_account_id.to_string(), + amount: burn_amount, + }), + ) + .await?; + + info!("Waiting for next block creation"); + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + // Verify holder ATA balance after burn + let holder_ata_acc = ctx.sequencer_client().get_account(holder_ata_id).await?; + let holder_holding = TokenHolding::try_from(&holder_ata_acc.data)?; + assert_eq!( + holder_holding, + TokenHolding::Fungible { + definition_id: definition_account_id, + balance: fund_amount - burn_amount, + } + ); + + // Verify the token definition total_supply decreased by burn_amount + let definition_acc = ctx + .sequencer_client() + .get_account(definition_account_id) + .await?; + let token_definition = TokenDefinition::try_from(&definition_acc.data)?; + assert_eq!( + token_definition, + TokenDefinition::Fungible { + name: "TEST".to_owned(), + total_supply: total_supply - burn_amount, + metadata_id: None, + } + ); + + // Verify the private holder's commitment is in state + let commitment = ctx + .wallet() + .get_private_account_commitment(holder_account_id) + .context("Private holder commitment not found")?; + assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await); + + Ok(()) +} diff --git a/integration_tests/tests/auth_transfer/private.rs b/integration_tests/tests/auth_transfer/private.rs index fb5643c8..59b4719a 100644 --- a/integration_tests/tests/auth_transfer/private.rs +++ b/integration_tests/tests/auth_transfer/private.rs @@ -8,6 +8,7 @@ use integration_tests::{ use log::info; use nssa::{AccountId, program::Program}; use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point}; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, SubcommandReturnValue, @@ -135,7 +136,7 @@ async fn deshielded_transfer_to_public_account() -> Result<()> { let acc_2_balance = ctx.sequencer_client().get_account_balance(to).await?; assert_eq!(from_acc.balance, 9900); - assert_eq!(acc_2_balance.balance, 20100); + assert_eq!(acc_2_balance, 20100); info!("Successfully deshielded transfer to public account"); @@ -175,7 +176,7 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> { let command = Command::AuthTransfer(AuthTransferSubcommand::Send { from: format_private_account_id(from), to: None, - to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), + to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), amount: 100, }); @@ -245,7 +246,7 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> { let acc_from_balance = ctx.sequencer_client().get_account_balance(from).await?; - assert_eq!(acc_from_balance.balance, 9900); + assert_eq!(acc_from_balance, 9900); assert_eq!(acc_to.balance, 20100); info!("Successfully shielded transfer to owned private account"); @@ -290,7 +291,7 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> { .await ); - assert_eq!(acc_1_balance.balance, 9900); + assert_eq!(acc_1_balance, 9900); info!("Successfully shielded transfer to foreign account"); @@ -335,7 +336,7 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> { let command = Command::AuthTransfer(AuthTransferSubcommand::Send { from: format_private_account_id(from), to: None, - to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), + to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), amount: 100, }); diff --git a/integration_tests/tests/auth_transfer/public.rs b/integration_tests/tests/auth_transfer/public.rs index ce73d62f..7f8c3836 100644 --- a/integration_tests/tests/auth_transfer/public.rs +++ b/integration_tests/tests/auth_transfer/public.rs @@ -4,6 +4,7 @@ use anyhow::Result; use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id}; use log::info; use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, SubcommandReturnValue, @@ -41,8 +42,8 @@ async fn successful_transfer_to_existing_account() -> Result<()> { info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 9900); - assert_eq!(acc_2_balance.balance, 20100); + assert_eq!(acc_1_balance, 9900); + assert_eq!(acc_2_balance, 20100); Ok(()) } @@ -97,8 +98,8 @@ pub async fn successful_transfer_to_new_account() -> Result<()> { info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 9900); - assert_eq!(acc_2_balance.balance, 100); + assert_eq!(acc_1_balance, 9900); + assert_eq!(acc_2_balance, 100); Ok(()) } @@ -134,8 +135,8 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> { info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 10000); - assert_eq!(acc_2_balance.balance, 20000); + assert_eq!(acc_1_balance, 10000); + assert_eq!(acc_2_balance, 20000); Ok(()) } @@ -171,8 +172,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> { info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 9900); - assert_eq!(acc_2_balance.balance, 20100); + assert_eq!(acc_1_balance, 9900); + assert_eq!(acc_2_balance, 20100); info!("First TX Success!"); @@ -203,8 +204,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> { info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 9800); - assert_eq!(acc_2_balance.balance, 20200); + assert_eq!(acc_1_balance, 9800); + assert_eq!(acc_2_balance, 20200); info!("Second TX Success!"); @@ -230,11 +231,7 @@ async fn initialize_public_account() -> Result<()> { wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; info!("Checking correct execution"); - let account = ctx - .sequencer_client() - .get_account(account_id) - .await? - .account; + let account = ctx.sequencer_client().get_account(account_id).await?; assert_eq!( account.program_owner, diff --git a/integration_tests/tests/block_size_limit.rs b/integration_tests/tests/block_size_limit.rs index 41c9fc76..72f773c9 100644 --- a/integration_tests/tests/block_size_limit.rs +++ b/integration_tests/tests/block_size_limit.rs @@ -8,11 +8,12 @@ use std::time::Duration; use anyhow::Result; use bytesize::ByteSize; -use common::{block::HashableBlockData, transaction::NSSATransaction}; +use common::transaction::NSSATransaction; use integration_tests::{ TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, config::SequencerPartialConfig, }; use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; use tokio::test; #[test] @@ -36,7 +37,10 @@ async fn reject_oversized_transaction() -> Result<()> { let tx = nssa::ProgramDeploymentTransaction::new(message); // Try to submit the transaction and expect an error - let result = ctx.sequencer_client().send_tx_program(tx).await; + let result = ctx + .sequencer_client() + .send_transaction(NSSATransaction::ProgramDeployment(tx)) + .await; assert!( result.is_err(), @@ -74,7 +78,10 @@ async fn accept_transaction_within_limit() -> Result<()> { let tx = nssa::ProgramDeploymentTransaction::new(message); // This should succeed - let result = ctx.sequencer_client().send_tx_program(tx).await; + let result = ctx + .sequencer_client() + .send_transaction(NSSATransaction::ProgramDeployment(tx)) + .await; assert!( result.is_ok(), @@ -112,33 +119,38 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> { let burner_id = Program::new(burner_bytecode.clone())?.id(); let chain_caller_id = Program::new(chain_caller_bytecode.clone())?.id(); - let initial_block_height = ctx.sequencer_client().get_last_block().await?.last_block; + let initial_block_height = ctx.sequencer_client().get_last_block_id().await?; // Submit both program deployments ctx.sequencer_client() - .send_tx_program(nssa::ProgramDeploymentTransaction::new( - nssa::program_deployment_transaction::Message::new(burner_bytecode), + .send_transaction(NSSATransaction::ProgramDeployment( + nssa::ProgramDeploymentTransaction::new( + nssa::program_deployment_transaction::Message::new(burner_bytecode), + ), )) .await?; ctx.sequencer_client() - .send_tx_program(nssa::ProgramDeploymentTransaction::new( - nssa::program_deployment_transaction::Message::new(chain_caller_bytecode), + .send_transaction(NSSATransaction::ProgramDeployment( + nssa::ProgramDeploymentTransaction::new( + nssa::program_deployment_transaction::Message::new(chain_caller_bytecode), + ), )) .await?; // Wait for first block tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; - let block1_response = ctx + let block1 = ctx .sequencer_client() .get_block(initial_block_height + 1) - .await?; - let block1: HashableBlockData = borsh::from_slice(&block1_response.block)?; + .await? + .unwrap(); // Check which program is in block 1 - let get_program_ids = |block: &HashableBlockData| -> Vec { + let get_program_ids = |block: &common::block::Block| -> Vec { block + .body .transactions .iter() .filter_map(|tx| { @@ -168,11 +180,11 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> { // Wait for second block tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; - let block2_response = ctx + let block2 = ctx .sequencer_client() .get_block(initial_block_height + 2) - .await?; - let block2: HashableBlockData = borsh::from_slice(&block2_response.block)?; + .await? + .unwrap(); let block2_program_ids = get_program_ids(&block2); // The other program should be in block 2 diff --git a/integration_tests/tests/indexer.rs b/integration_tests/tests/indexer.rs index 0b947135..cb8cf0e9 100644 --- a/integration_tests/tests/indexer.rs +++ b/integration_tests/tests/indexer.rs @@ -22,12 +22,8 @@ async fn indexer_test_run() -> Result<()> { // RUN OBSERVATION tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await; - let last_block_seq = ctx - .sequencer_client() - .get_last_block() - .await - .unwrap() - .last_block; + let last_block_seq = + sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client()).await?; info!("Last block on seq now is {last_block_seq}"); @@ -100,20 +96,22 @@ async fn indexer_state_consistency() -> Result<()> { tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; info!("Checking correct balance move"); - let acc_1_balance = ctx - .sequencer_client() - .get_account_balance(ctx.existing_public_accounts()[0]) - .await?; - let acc_2_balance = ctx - .sequencer_client() - .get_account_balance(ctx.existing_public_accounts()[1]) - .await?; + let acc_1_balance = sequencer_service_rpc::RpcClient::get_account_balance( + ctx.sequencer_client(), + ctx.existing_public_accounts()[0], + ) + .await?; + let acc_2_balance = sequencer_service_rpc::RpcClient::get_account_balance( + ctx.sequencer_client(), + ctx.existing_public_accounts()[1], + ) + .await?; info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}"); - assert_eq!(acc_1_balance.balance, 9900); - assert_eq!(acc_2_balance.balance, 20100); + assert_eq!(acc_1_balance, 9900); + assert_eq!(acc_2_balance, 20100); // WAIT info!("Waiting for indexer to parse blocks"); @@ -131,16 +129,16 @@ async fn indexer_state_consistency() -> Result<()> { .unwrap(); info!("Checking correct state transition"); - let acc1_seq_state = ctx - .sequencer_client() - .get_account(ctx.existing_public_accounts()[0]) - .await? - .account; - let acc2_seq_state = ctx - .sequencer_client() - .get_account(ctx.existing_public_accounts()[1]) - .await? - .account; + let acc1_seq_state = sequencer_service_rpc::RpcClient::get_account( + ctx.sequencer_client(), + ctx.existing_public_accounts()[0], + ) + .await?; + let acc2_seq_state = sequencer_service_rpc::RpcClient::get_account( + ctx.sequencer_client(), + ctx.existing_public_accounts()[1], + ) + .await?; assert_eq!(acc1_ind_state, acc1_seq_state.into()); assert_eq!(acc2_ind_state, acc2_seq_state.into()); diff --git a/integration_tests/tests/keys_restoration.rs b/integration_tests/tests/keys_restoration.rs index f438ef70..cdbe2e6b 100644 --- a/integration_tests/tests/keys_restoration.rs +++ b/integration_tests/tests/keys_restoration.rs @@ -14,6 +14,7 @@ use integration_tests::{ use key_protocol::key_management::key_tree::chain_index::ChainIndex; use log::info; use nssa::{AccountId, program::Program}; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, SubcommandReturnValue, @@ -70,7 +71,7 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> { let command = Command::AuthTransfer(AuthTransferSubcommand::Send { from: format_private_account_id(from), to: None, - to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), + to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), amount: 100, }); @@ -305,8 +306,8 @@ async fn restore_keys_from_seed() -> Result<()> { .get_account_balance(to_account_id4) .await?; - assert_eq!(acc3.balance, 91); // 102 - 11 - assert_eq!(acc4.balance, 114); // 103 + 11 + assert_eq!(acc3, 91); // 102 - 11 + assert_eq!(acc4, 114); // 103 + 11 info!("Successfully restored keys and verified transactions"); diff --git a/integration_tests/tests/pinata.rs b/integration_tests/tests/pinata.rs index 38cfeac3..3285c216 100644 --- a/integration_tests/tests/pinata.rs +++ b/integration_tests/tests/pinata.rs @@ -13,6 +13,7 @@ use integration_tests::{ format_public_account_id, verify_commitment_is_in_state, }; use log::info; +use sequencer_service_rpc::RpcClient as _; use tokio::test; use wallet::cli::{ Command, SubcommandReturnValue, @@ -46,8 +47,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()> let pinata_balance_pre = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; let claim_result = wallet::cli::execute_subcommand( ctx.wallet_mut(), @@ -70,8 +70,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()> let pinata_balance_post = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; assert_eq!(pinata_balance_post, pinata_balance_pre); @@ -102,8 +101,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<() let pinata_balance_pre = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; let claim_result = wallet::cli::execute_subcommand( ctx.wallet_mut(), @@ -126,8 +124,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<() let pinata_balance_post = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; assert_eq!(pinata_balance_post, pinata_balance_pre); @@ -146,8 +143,7 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> { let pinata_balance_pre = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; @@ -158,14 +154,12 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> { let pinata_balance_post = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; let winner_balance_post = ctx .sequencer_client() .get_account_balance(ctx.existing_public_accounts()[0]) - .await? - .balance; + .await?; assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); assert_eq!(winner_balance_post, 10000 + pinata_prize); @@ -187,8 +181,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> { let pinata_balance_pre = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash: _ } = result else { @@ -211,8 +204,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> { let pinata_balance_post = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); @@ -268,8 +260,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> { let pinata_balance_pre = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; @@ -285,8 +276,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> { let pinata_balance_post = ctx .sequencer_client() .get_account_balance(PINATA_BASE58.parse().unwrap()) - .await? - .balance; + .await?; assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); diff --git a/integration_tests/tests/program_deployment.rs b/integration_tests/tests/program_deployment.rs index 1feb7290..64f5a655 100644 --- a/integration_tests/tests/program_deployment.rs +++ b/integration_tests/tests/program_deployment.rs @@ -6,13 +6,18 @@ use std::{path::PathBuf, time::Duration}; use anyhow::Result; +use common::transaction::NSSATransaction; use integration_tests::{ NSSA_PROGRAM_FOR_TEST_DATA_CHANGER, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, }; use log::info; -use nssa::{AccountId, program::Program}; +use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; use tokio::test; -use wallet::cli::Command; +use wallet::cli::{ + Command, SubcommandReturnValue, + account::{AccountSubcommand, NewSubcommand}, +}; #[test] async fn deploy_and_execute_program() -> Result<()> { @@ -38,32 +43,48 @@ async fn deploy_and_execute_program() -> Result<()> { // logic) let bytecode = std::fs::read(binary_filepath)?; let data_changer = Program::new(bytecode)?; - let account_id: AccountId = "11".repeat(16).parse()?; + + let SubcommandReturnValue::RegisterAccount { account_id } = wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Account(AccountSubcommand::New(NewSubcommand::Public { + cci: None, + label: None, + })), + ) + .await? + else { + panic!("Expected RegisterAccount return value"); + }; + + let nonces = ctx.wallet().get_accounts_nonces(vec![account_id]).await?; + let private_key = ctx + .wallet() + .get_account_public_signing_key(account_id) + .unwrap(); let message = nssa::public_transaction::Message::try_new( data_changer.id(), vec![account_id], - vec![], + nonces, vec![0], )?; - let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]); + let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[private_key]); let transaction = nssa::PublicTransaction::new(message, witness_set); - let _response = ctx.sequencer_client().send_tx_public(transaction).await?; + let _response = ctx + .sequencer_client() + .send_transaction(NSSATransaction::Public(transaction)) + .await?; info!("Waiting for next block creation"); // Waiting for long time as it may take some time for such a big transaction to be included in a // block tokio::time::sleep(Duration::from_secs(2 * TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; - let post_state_account = ctx - .sequencer_client() - .get_account(account_id) - .await? - .account; + let post_state_account = ctx.sequencer_client().get_account(account_id).await?; assert_eq!(post_state_account.program_owner, data_changer.id()); assert_eq!(post_state_account.balance, 0); assert_eq!(post_state_account.data.as_ref(), &[0]); - assert_eq!(post_state_account.nonce.0, 0); + assert_eq!(post_state_account.nonce.0, 1); info!("Successfully deployed and executed program"); diff --git a/integration_tests/tests/token.rs b/integration_tests/tests/token.rs index d3fbfdc1..b638b6c9 100644 --- a/integration_tests/tests/token.rs +++ b/integration_tests/tests/token.rs @@ -14,6 +14,7 @@ use integration_tests::{ use key_protocol::key_management::key_tree::chain_index::ChainIndex; use log::info; use nssa::program::Program; +use sequencer_service_rpc::RpcClient as _; use token_core::{TokenDefinition, TokenHolding}; use tokio::test; use wallet::cli::{ @@ -92,8 +93,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let definition_acc = ctx .sequencer_client() .get_account(definition_account_id) - .await? - .account; + .await?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?; assert_eq!(definition_acc.program_owner, Program::token().id()); @@ -110,8 +110,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let supply_acc = ctx .sequencer_client() .get_account(supply_account_id) - .await? - .account; + .await?; // The account must be owned by the token program assert_eq!(supply_acc.program_owner, Program::token().id()); @@ -143,8 +142,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let supply_acc = ctx .sequencer_client() .get_account(supply_account_id) - .await? - .account; + .await?; assert_eq!(supply_acc.program_owner, Program::token().id()); let token_holding = TokenHolding::try_from(&supply_acc.data)?; assert_eq!( @@ -159,8 +157,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let recipient_acc = ctx .sequencer_client() .get_account(recipient_account_id) - .await? - .account; + .await?; assert_eq!(recipient_acc.program_owner, Program::token().id()); let token_holding = TokenHolding::try_from(&recipient_acc.data)?; assert_eq!( @@ -188,8 +185,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let definition_acc = ctx .sequencer_client() .get_account(definition_account_id) - .await? - .account; + .await?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?; assert_eq!( @@ -205,8 +201,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let recipient_acc = ctx .sequencer_client() .get_account(recipient_account_id) - .await? - .account; + .await?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?; assert_eq!( @@ -236,8 +231,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let definition_acc = ctx .sequencer_client() .get_account(definition_account_id) - .await? - .account; + .await?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?; assert_eq!( @@ -253,8 +247,7 @@ async fn create_and_transfer_public_token() -> Result<()> { let recipient_acc = ctx .sequencer_client() .get_account(recipient_account_id) - .await? - .account; + .await?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?; assert_eq!( @@ -341,8 +334,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> { let definition_acc = ctx .sequencer_client() .get_account(definition_account_id) - .await? - .account; + .await?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?; assert_eq!(definition_acc.program_owner, Program::token().id()); @@ -405,8 +397,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> { let definition_acc = ctx .sequencer_client() .get_account(definition_account_id) - .await? - .account; + .await?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?; assert_eq!( @@ -506,8 +497,7 @@ async fn create_token_with_private_definition() -> Result<()> { let supply_acc = ctx .sequencer_client() .get_account(supply_account_id) - .await? - .account; + .await?; assert_eq!(supply_acc.program_owner, Program::token().id()); let token_holding = TokenHolding::try_from(&supply_acc.data)?; @@ -586,8 +576,7 @@ async fn create_token_with_private_definition() -> Result<()> { let recipient_acc = ctx .sequencer_client() .get_account(recipient_account_id_public) - .await? - .account; + .await?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?; assert_eq!( @@ -882,8 +871,7 @@ async fn shielded_token_transfer() -> Result<()> { let supply_acc = ctx .sequencer_client() .get_account(supply_account_id) - .await? - .account; + .await?; let token_holding = TokenHolding::try_from(&supply_acc.data)?; assert_eq!( token_holding, @@ -1026,8 +1014,7 @@ async fn deshielded_token_transfer() -> Result<()> { let recipient_acc = ctx .sequencer_client() .get_account(recipient_account_id) - .await? - .account; + .await?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?; assert_eq!( token_holding, @@ -1123,7 +1110,7 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> { let subcommand = TokenProgramAgnosticSubcommand::Mint { definition: format_private_account_id(definition_account_id), holder: None, - holder_npk: Some(hex::encode(holder_keys.nullifer_public_key.0)), + holder_npk: Some(hex::encode(holder_keys.nullifier_public_key.0)), holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)), amount: mint_amount, }; diff --git a/integration_tests/tests/tps.rs b/integration_tests/tests/tps.rs index 1dee3a85..bd46849e 100644 --- a/integration_tests/tests/tps.rs +++ b/integration_tests/tests/tps.rs @@ -13,6 +13,7 @@ use std::time::{Duration, Instant}; use anyhow::Result; use bytesize::ByteSize; +use common::transaction::NSSATransaction; use integration_tests::{ TestContext, config::{InitialData, SequencerPartialConfig}, @@ -30,6 +31,7 @@ use nssa_core::{ account::{AccountWithMetadata, Nonce, data::Data}, encryption::ViewingPublicKey, }; +use sequencer_service_rpc::RpcClient as _; use tokio::test; pub(crate) struct TpsTestManager { @@ -153,10 +155,9 @@ pub async fn tps_test() -> Result<()> { for (i, tx) in txs.into_iter().enumerate() { let tx_hash = ctx .sequencer_client() - .send_tx_public(tx) + .send_transaction(NSSATransaction::Public(tx)) .await - .unwrap() - .tx_hash; + .unwrap(); info!("Sent tx {i}"); tx_hashes.push(tx_hash); } @@ -170,15 +171,13 @@ pub async fn tps_test() -> Result<()> { let tx_obj = ctx .sequencer_client() - .get_transaction_by_hash(*tx_hash) + .get_transaction(*tx_hash) .await .inspect_err(|err| { log::warn!("Failed to get transaction by hash {tx_hash} with error: {err:#?}"); }); - if let Ok(tx_obj) = tx_obj - && tx_obj.transaction.is_some() - { + if tx_obj.is_ok_and(|opt| opt.is_some()) { info!("Found tx {i} with hash {tx_hash}"); break; } diff --git a/integration_tests/tests/wallet_ffi.rs b/integration_tests/tests/wallet_ffi.rs index a32e4d98..ac548280 100644 --- a/integration_tests/tests/wallet_ffi.rs +++ b/integration_tests/tests/wallet_ffi.rs @@ -24,7 +24,6 @@ use log::info; use nssa::{Account, AccountId, PrivateKey, PublicKey, program::Program}; use nssa_core::program::DEFAULT_PROGRAM_ID; use tempfile::tempdir; -use wallet::WalletCore; use wallet_ffi::{ FfiAccount, FfiAccountList, FfiBytes32, FfiPrivateAccountKeys, FfiPublicAccountKey, FfiTransferResult, WalletHandle, error, @@ -211,14 +210,6 @@ fn new_wallet_ffi_with_default_config(password: &str) -> Result<*mut WalletHandl }) } -fn new_wallet_rust_with_default_config(password: &str) -> Result { - let tempdir = tempdir()?; - let config_path = tempdir.path().join("wallet_config.json"); - let storage_path = tempdir.path().join("storage.json"); - - WalletCore::new_init_storage(config_path, storage_path, None, password.to_owned()) -} - fn load_existing_ffi_wallet(home: &Path) -> Result<*mut WalletHandle> { let config_path = home.join("wallet_config.json"); let storage_path = home.join("storage.json"); @@ -232,19 +223,8 @@ fn load_existing_ffi_wallet(home: &Path) -> Result<*mut WalletHandle> { fn wallet_ffi_create_public_accounts() -> Result<()> { let password = "password_for_tests"; let n_accounts = 10; - // First `n_accounts` public accounts created with Rust wallet - let new_public_account_ids_rust = { - let mut account_ids = Vec::new(); - let mut wallet_rust = new_wallet_rust_with_default_config(password)?; - for _ in 0..n_accounts { - let account_id = wallet_rust.create_new_account_public(None).0; - account_ids.push(*account_id.value()); - } - account_ids - }; - - // First `n_accounts` public accounts created with wallet FFI + // Create `n_accounts` public accounts with wallet FFI let new_public_account_ids_ffi = unsafe { let mut account_ids = Vec::new(); @@ -258,7 +238,20 @@ fn wallet_ffi_create_public_accounts() -> Result<()> { account_ids }; - assert_eq!(new_public_account_ids_ffi, new_public_account_ids_rust); + // All returned IDs must be unique and non-zero + assert_eq!(new_public_account_ids_ffi.len(), n_accounts); + let unique: HashSet<_> = new_public_account_ids_ffi.iter().collect(); + assert_eq!( + unique.len(), + n_accounts, + "Duplicate public account IDs returned" + ); + assert!( + new_public_account_ids_ffi + .iter() + .all(|id| *id != [0_u8; 32]), + "Zero account ID returned" + ); Ok(()) } @@ -267,19 +260,7 @@ fn wallet_ffi_create_public_accounts() -> Result<()> { fn wallet_ffi_create_private_accounts() -> Result<()> { let password = "password_for_tests"; let n_accounts = 10; - // First `n_accounts` private accounts created with Rust wallet - let new_private_account_ids_rust = { - let mut account_ids = Vec::new(); - - let mut wallet_rust = new_wallet_rust_with_default_config(password)?; - for _ in 0..n_accounts { - let account_id = wallet_rust.create_new_account_private(None).0; - account_ids.push(*account_id.value()); - } - account_ids - }; - - // First `n_accounts` private accounts created with wallet FFI + // Create `n_accounts` private accounts with wallet FFI let new_private_account_ids_ffi = unsafe { let mut account_ids = Vec::new(); @@ -293,7 +274,20 @@ fn wallet_ffi_create_private_accounts() -> Result<()> { account_ids }; - assert_eq!(new_private_account_ids_ffi, new_private_account_ids_rust); + // All returned IDs must be unique and non-zero + assert_eq!(new_private_account_ids_ffi.len(), n_accounts); + let unique: HashSet<_> = new_private_account_ids_ffi.iter().collect(); + assert_eq!( + unique.len(), + n_accounts, + "Duplicate private account IDs returned" + ); + assert!( + new_private_account_ids_ffi + .iter() + .all(|id| *id != [0_u8; 32]), + "Zero account ID returned" + ); Ok(()) } @@ -349,28 +343,23 @@ fn wallet_ffi_save_and_load_persistent_storage() -> Result<()> { fn test_wallet_ffi_list_accounts() -> Result<()> { let password = "password_for_tests"; - // Create the wallet FFI - let wallet_ffi_handle = unsafe { + // Create the wallet FFI and track which account IDs were created as public/private + let (wallet_ffi_handle, created_public_ids, created_private_ids) = unsafe { let handle = new_wallet_ffi_with_default_config(password)?; - // Create 5 public accounts and 5 private accounts + let mut public_ids: Vec<[u8; 32]> = Vec::new(); + let mut private_ids: Vec<[u8; 32]> = Vec::new(); + + // Create 5 public accounts and 5 private accounts, recording their IDs for _ in 0..5 { let mut out_account_id = FfiBytes32::from_bytes([0; 32]); wallet_ffi_create_account_public(handle, &raw mut out_account_id); + public_ids.push(out_account_id.data); + wallet_ffi_create_account_private(handle, &raw mut out_account_id); + private_ids.push(out_account_id.data); } - handle - }; - - // Create the wallet Rust - let wallet_rust = { - let mut wallet = new_wallet_rust_with_default_config(password)?; - // Create 5 public accounts and 5 private accounts - for _ in 0..5 { - wallet.create_new_account_public(None); - wallet.create_new_account_private(None); - } - wallet + (handle, public_ids, private_ids) }; // Get the account list with FFI method @@ -380,15 +369,6 @@ fn test_wallet_ffi_list_accounts() -> Result<()> { out_list }; - let wallet_rust_account_ids = wallet_rust - .storage() - .user_data - .account_ids() - .collect::>(); - - // Assert same number of elements between Rust and FFI result - assert_eq!(wallet_rust_account_ids.len(), wallet_ffi_account_list.count); - let wallet_ffi_account_list_slice = unsafe { core::slice::from_raw_parts( wallet_ffi_account_list.entries, @@ -396,37 +376,38 @@ fn test_wallet_ffi_list_accounts() -> Result<()> { ) }; - // Assert same account ids between Rust and FFI result - assert_eq!( - wallet_rust_account_ids - .iter() - .map(nssa::AccountId::value) - .collect::>(), - wallet_ffi_account_list_slice - .iter() - .map(|entry| &entry.account_id.data) - .collect::>() - ); + // All created accounts must appear in the list + let listed_public_ids: HashSet<[u8; 32]> = wallet_ffi_account_list_slice + .iter() + .filter(|e| e.is_public) + .map(|e| e.account_id.data) + .collect(); + let listed_private_ids: HashSet<[u8; 32]> = wallet_ffi_account_list_slice + .iter() + .filter(|e| !e.is_public) + .map(|e| e.account_id.data) + .collect(); - // Assert `is_pub` flag is correct in the FFI result - for entry in wallet_ffi_account_list_slice { - let account_id = AccountId::new(entry.account_id.data); - let is_pub_default_in_rust_wallet = wallet_rust - .storage() - .user_data - .default_pub_account_signing_keys - .contains_key(&account_id); - let is_pub_key_tree_wallet_rust = wallet_rust - .storage() - .user_data - .public_key_tree - .account_id_map - .contains_key(&account_id); - - let is_public_in_rust_wallet = is_pub_default_in_rust_wallet || is_pub_key_tree_wallet_rust; - - assert_eq!(entry.is_public, is_public_in_rust_wallet); + for id in &created_public_ids { + assert!( + listed_public_ids.contains(id), + "Created public account not found in list with is_public=true" + ); } + for id in &created_private_ids { + assert!( + listed_private_ids.contains(id), + "Created private account not found in list with is_public=false" + ); + } + + // Total listed accounts must be at least the number we created + assert!( + wallet_ffi_account_list.count >= created_public_ids.len() + created_private_ids.len(), + "Listed account count ({}) is less than the number of created accounts ({})", + wallet_ffi_account_list.count, + created_public_ids.len() + created_private_ids.len() + ); unsafe { wallet_ffi_free_account_list(&raw mut wallet_ffi_account_list); @@ -606,7 +587,7 @@ fn test_wallet_ffi_get_private_account_keys() -> Result<()> { .unwrap() .0; - let expected_npk = &key_chain.nullifer_public_key; + let expected_npk = &key_chain.nullifier_public_key; let expected_vpk = &key_chain.viewing_public_key; assert_eq!(&keys.npk(), expected_npk); @@ -924,7 +905,7 @@ fn test_wallet_ffi_transfer_deshielded() -> Result<()> { let home = tempfile::tempdir()?; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx, home.path())?; let from: FfiBytes32 = (&ctx.ctx().existing_private_accounts()[0]).into(); - let to = FfiBytes32::from_bytes([37; 32]); + let to: FfiBytes32 = (&ctx.ctx().existing_public_accounts()[0]).into(); let amount: [u8; 16] = 100_u128.to_le_bytes(); let mut transfer_result = FfiTransferResult::default(); @@ -967,7 +948,7 @@ fn test_wallet_ffi_transfer_deshielded() -> Result<()> { }; assert_eq!(from_balance, 9900); - assert_eq!(to_balance, 100); + assert_eq!(to_balance, 10100); unsafe { wallet_ffi_free_transfer_result(&raw mut transfer_result); diff --git a/key_protocol/Cargo.toml b/key_protocol/Cargo.toml index 7a16b627..022f3ccd 100644 --- a/key_protocol/Cargo.toml +++ b/key_protocol/Cargo.toml @@ -8,8 +8,6 @@ license = { workspace = true } workspace = true [dependencies] -secp256k1 = "0.31.1" - nssa.workspace = true nssa_core.workspace = true common.workspace = true @@ -19,10 +17,12 @@ serde.workspace = true k256.workspace = true sha2.workspace = true rand.workspace = true -base58.workspace = true hex.workspace = true aes-gcm.workspace = true bip39.workspace = true hmac-sha512.workspace = true thiserror.workspace = true itertools.workspace = true + +[dev-dependencies] +base58.workspace = true diff --git a/key_protocol/src/key_management/key_tree/keys_private.rs b/key_protocol/src/key_management/key_tree/keys_private.rs index 3fcbb0d2..42130b1f 100644 --- a/key_protocol/src/key_management/key_tree/keys_private.rs +++ b/key_protocol/src/key_management/key_tree/keys_private.rs @@ -39,7 +39,7 @@ impl KeyNode for ChildKeysPrivate { value: ( KeyChain { secret_spending_key: ssk, - nullifer_public_key: npk, + nullifier_public_key: npk, viewing_public_key: vpk, private_key_holder: PrivateKeyHolder { nullifier_secret_key: nsk, @@ -54,10 +54,7 @@ impl KeyNode for ChildKeysPrivate { } fn nth_child(&self, cci: u32) -> Self { - #[expect( - clippy::arithmetic_side_effects, - reason = "Multiplying finite field scalars gives no unexpected side effects" - )] + #[expect(clippy::arithmetic_side_effects, reason = "TODO: fix later")] let parent_pt = Scalar::from_repr(self.value.0.private_key_holder.nullifier_secret_key.into()) .expect("Key generated as scalar, must be valid representation") @@ -67,7 +64,8 @@ impl KeyNode for ChildKeysPrivate { input.extend_from_slice(b"LEE_seed_priv"); input.extend_from_slice(&parent_pt.to_bytes()); - input.extend_from_slice(&cci.to_le_bytes()); + #[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")] + input.extend_from_slice(&cci.to_be_bytes()); let hash_value = hmac_sha512::HMAC::mac(input, self.ccc); @@ -90,7 +88,7 @@ impl KeyNode for ChildKeysPrivate { value: ( KeyChain { secret_spending_key: ssk, - nullifer_public_key: npk, + nullifier_public_key: npk, viewing_public_key: vpk, private_key_holder: PrivateKeyHolder { nullifier_secret_key: nsk, @@ -113,29 +111,38 @@ impl KeyNode for ChildKeysPrivate { } fn account_id(&self) -> nssa::AccountId { - nssa::AccountId::from(&self.value.0.nullifer_public_key) + nssa::AccountId::from(&self.value.0.nullifier_public_key) } } -impl<'keys> From<&'keys ChildKeysPrivate> for &'keys (KeyChain, nssa::Account) { - fn from(value: &'keys ChildKeysPrivate) -> Self { +#[expect( + clippy::single_char_lifetime_names, + reason = "TODO add meaningful name" +)] +impl<'a> From<&'a ChildKeysPrivate> for &'a (KeyChain, nssa::Account) { + fn from(value: &'a ChildKeysPrivate) -> Self { &value.value } } -impl<'keys> From<&'keys mut ChildKeysPrivate> for &'keys mut (KeyChain, nssa::Account) { - fn from(value: &'keys mut ChildKeysPrivate) -> Self { +#[expect( + clippy::single_char_lifetime_names, + reason = "TODO add meaningful name" +)] +impl<'a> From<&'a mut ChildKeysPrivate> for &'a mut (KeyChain, nssa::Account) { + fn from(value: &'a mut ChildKeysPrivate) -> Self { &mut value.value } } #[cfg(test)] mod tests { - use nssa_core::NullifierSecretKey; + use nssa_core::{NullifierPublicKey, NullifierSecretKey}; use super::*; use crate::key_management::{self, secret_holders::ViewingSecretKey}; + #[expect(clippy::redundant_type_annotations, reason = "TODO: clippy requires")] #[test] fn master_key_generation() { let seed: [u8; 64] = [ @@ -147,7 +154,7 @@ mod tests { let keys = ChildKeysPrivate::root(seed); - let expected_ssk = key_management::secret_holders::SecretSpendingKey([ + let expected_ssk: SecretSpendingKey = key_management::secret_holders::SecretSpendingKey([ 246, 79, 26, 124, 135, 95, 52, 51, 201, 27, 48, 194, 2, 144, 51, 219, 245, 128, 139, 222, 42, 195, 105, 33, 115, 97, 186, 0, 97, 14, 218, 191, ]); @@ -162,11 +169,11 @@ mod tests { 34, 234, 19, 222, 2, 22, 12, 163, 252, 88, 11, 0, 163, ]; - let expected_npk = nssa_core::NullifierPublicKey([ + let expected_npk: NullifierPublicKey = nssa_core::NullifierPublicKey([ 7, 123, 125, 191, 233, 183, 201, 4, 20, 214, 155, 210, 45, 234, 27, 240, 194, 111, 97, 247, 155, 113, 122, 246, 192, 0, 70, 61, 76, 71, 70, 2, ]); - let expected_vsk: ViewingSecretKey = [ + let expected_vsk = [ 155, 90, 54, 75, 228, 130, 68, 201, 129, 251, 180, 195, 250, 64, 34, 230, 241, 204, 216, 50, 149, 156, 10, 67, 208, 74, 9, 10, 47, 59, 50, 202, ]; @@ -179,7 +186,7 @@ mod tests { assert!(expected_ssk == keys.value.0.secret_spending_key); assert!(expected_ccc == keys.ccc); assert!(expected_nsk == keys.value.0.private_key_holder.nullifier_secret_key); - assert!(expected_npk == keys.value.0.nullifer_public_key); + assert!(expected_npk == keys.value.0.nullifier_public_key); assert!(expected_vsk == keys.value.0.private_key_holder.viewing_secret_key); assert!(expected_vpk_as_bytes == keys.value.0.viewing_public_key.to_bytes()); } @@ -197,31 +204,31 @@ mod tests { let child_node = ChildKeysPrivate::nth_child(&root_node, 42_u32); let expected_ccc: [u8; 32] = [ - 145, 59, 225, 32, 54, 168, 14, 45, 60, 253, 57, 202, 31, 86, 142, 234, 51, 57, 154, 88, - 132, 200, 92, 191, 220, 144, 42, 184, 108, 35, 226, 146, + 27, 73, 133, 213, 214, 63, 217, 184, 164, 17, 172, 140, 223, 95, 255, 157, 11, 0, 58, + 53, 82, 147, 121, 120, 199, 50, 30, 28, 103, 24, 121, 187, ]; let expected_nsk: NullifierSecretKey = [ - 19, 100, 119, 73, 191, 225, 234, 219, 129, 88, 40, 229, 63, 225, 189, 136, 69, 172, - 221, 186, 147, 83, 150, 207, 70, 17, 228, 70, 113, 87, 227, 31, + 124, 61, 40, 92, 33, 135, 3, 41, 200, 234, 3, 69, 102, 184, 57, 191, 106, 151, 194, + 192, 103, 132, 141, 112, 249, 108, 192, 117, 24, 48, 70, 216, ]; let expected_npk = nssa_core::NullifierPublicKey([ - 133, 235, 223, 151, 12, 69, 26, 222, 60, 125, 235, 125, 167, 212, 201, 168, 101, 242, - 111, 239, 1, 228, 12, 252, 146, 53, 75, 17, 187, 255, 122, 181, + 116, 231, 246, 189, 145, 240, 37, 59, 219, 223, 216, 246, 116, 171, 223, 55, 197, 200, + 134, 192, 221, 40, 218, 167, 239, 5, 11, 95, 147, 247, 162, 226, ]); let expected_vsk: ViewingSecretKey = [ - 218, 219, 193, 132, 160, 6, 178, 194, 139, 248, 199, 81, 17, 133, 37, 201, 58, 104, 49, - 222, 187, 46, 156, 93, 14, 118, 209, 243, 38, 101, 77, 45, + 33, 155, 68, 60, 102, 70, 47, 105, 194, 129, 44, 26, 143, 198, 44, 244, 185, 31, 236, + 252, 205, 89, 138, 107, 39, 38, 154, 73, 109, 166, 41, 114, ]; let expected_vpk_as_bytes: [u8; 33] = [ - 3, 164, 65, 167, 88, 167, 179, 51, 159, 27, 241, 174, 77, 174, 142, 106, 128, 96, 69, - 74, 117, 231, 42, 193, 235, 153, 206, 116, 102, 7, 101, 192, 45, + 2, 78, 213, 113, 117, 105, 162, 248, 175, 68, 128, 232, 106, 204, 208, 159, 11, 78, 48, + 244, 127, 112, 46, 0, 93, 184, 1, 77, 132, 160, 75, 152, 88, ]; assert!(expected_ccc == child_node.ccc); assert!(expected_nsk == child_node.value.0.private_key_holder.nullifier_secret_key); - assert!(expected_npk == child_node.value.0.nullifer_public_key); + assert!(expected_npk == child_node.value.0.nullifier_public_key); assert!(expected_vsk == child_node.value.0.private_key_holder.viewing_secret_key); assert!(expected_vpk_as_bytes == child_node.value.0.viewing_public_key.to_bytes()); } diff --git a/key_protocol/src/key_management/key_tree/keys_public.rs b/key_protocol/src/key_management/key_tree/keys_public.rs index 470acaaa..d4c32b4a 100644 --- a/key_protocol/src/key_management/key_tree/keys_public.rs +++ b/key_protocol/src/key_management/key_tree/keys_public.rs @@ -1,4 +1,4 @@ -use secp256k1::Scalar; +use k256::elliptic_curve::{PrimeField as _, sec1::ToEncodedPoint as _}; use serde::{Deserialize, Serialize}; use crate::key_management::key_tree::traits::KeyNode; @@ -16,14 +16,22 @@ impl ChildKeysPublic { fn compute_hash_value(&self, cci: u32) -> [u8; 64] { let mut hash_input = vec![]; - if 2_u32.pow(31) > cci { - // Non-harden - hash_input.extend_from_slice(self.cpk.value()); + if ((2_u32).pow(31)).cmp(&cci) == std::cmp::Ordering::Greater { + // Non-harden. + // BIP-032 compatibility requires 1-byte header from the public_key; + // Not stored in `self.cpk.value()`. + let sk = k256::SecretKey::from_bytes(self.csk.value().into()) + .expect("32 bytes, within curve order"); + let pk = sk.public_key(); + hash_input.extend_from_slice(pk.to_encoded_point(true).as_bytes()); } else { - // Harden + // Harden. + hash_input.extend_from_slice(&[0_u8]); hash_input.extend_from_slice(self.csk.value()); } - hash_input.extend_from_slice(&cci.to_le_bytes()); + + #[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")] + hash_input.extend_from_slice(&cci.to_be_bytes()); hmac_sha512::HMAC::mac(hash_input, self.ccc) } @@ -33,7 +41,12 @@ impl KeyNode for ChildKeysPublic { fn root(seed: [u8; 64]) -> Self { let hash_value = hmac_sha512::HMAC::mac(seed, "LEE_master_pub"); - let csk = nssa::PrivateKey::try_new(*hash_value.first_chunk::<32>().unwrap()).unwrap(); + let csk = nssa::PrivateKey::try_new( + *hash_value + .first_chunk::<32>() + .expect("hash_value is 64 bytes, must be safe to get first 32"), + ) + .expect("Expect a valid Private Key"); let ccc = *hash_value.last_chunk::<32>().unwrap(); let cpk = nssa::PublicKey::new_from_private_key(&csk); @@ -48,24 +61,20 @@ impl KeyNode for ChildKeysPublic { fn nth_child(&self, cci: u32) -> Self { let hash_value = self.compute_hash_value(cci); - let csk = secp256k1::SecretKey::from_byte_array( - *hash_value + let csk = nssa::PrivateKey::try_new({ + let hash_value = hash_value .first_chunk::<32>() - .expect("hash_value is 64 bytes, must be safe to get first 32"), - ) - .unwrap(); + .expect("hash_value is 64 bytes, must be safe to get first 32"); - let csk = nssa::PrivateKey::try_new( - csk.add_tweak(&Scalar::from_le_bytes(*self.csk.value()).unwrap()) - .expect("Expect a valid Scalar") - .secret_bytes(), - ) - .unwrap(); + let value_1 = + k256::Scalar::from_repr((*hash_value).into()).expect("Expect a valid k256 scalar"); + let value_2 = k256::Scalar::from_repr((*self.csk.value()).into()) + .expect("Expect a valid k256 scalar"); - assert!( - secp256k1::constants::CURVE_ORDER >= *csk.value(), - "Secret key cannot exceed curve order" - ); + let sum = value_1.add(&value_2); + sum.to_bytes().into() + }) + .expect("Expect a valid private key"); let ccc = *hash_value .last_chunk::<32>() @@ -94,8 +103,12 @@ impl KeyNode for ChildKeysPublic { } } -impl<'keys> From<&'keys ChildKeysPublic> for &'keys nssa::PrivateKey { - fn from(value: &'keys ChildKeysPublic) -> Self { +#[expect( + clippy::single_char_lifetime_names, + reason = "TODO add meaningful name" +)] +impl<'a> From<&'a ChildKeysPublic> for &'a nssa::PrivateKey { + fn from(value: &'a ChildKeysPublic) -> Self { &value.csk } } @@ -126,6 +139,7 @@ mod tests { 202, 148, 181, 228, 35, 222, 58, 84, 156, 24, 146, 86, ]) .unwrap(); + let expected_cpk: PublicKey = PublicKey::try_new([ 219, 141, 130, 105, 11, 203, 187, 124, 112, 75, 223, 22, 11, 164, 153, 127, 59, 247, 244, 166, 75, 66, 242, 224, 35, 156, 161, 75, 41, 51, 76, 245, @@ -149,26 +163,20 @@ mod tests { let cci = (2_u32).pow(31) + 13; let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); - print!( - "{} {}", - child_keys.csk.value()[0], - child_keys.csk.value()[1] - ); - let expected_ccc = [ - 126, 175, 244, 41, 41, 173, 134, 103, 139, 140, 195, 86, 194, 147, 116, 48, 71, 107, - 253, 235, 114, 139, 60, 115, 226, 205, 215, 248, 240, 190, 196, 6, + 149, 226, 13, 4, 194, 12, 69, 29, 9, 234, 209, 119, 98, 4, 128, 91, 37, 103, 192, 31, + 130, 126, 123, 20, 90, 34, 173, 209, 101, 248, 155, 36, ]; let expected_csk: PrivateKey = PrivateKey::try_new([ - 128, 148, 53, 165, 222, 155, 163, 108, 186, 182, 124, 67, 90, 86, 59, 123, 95, 224, - 171, 4, 51, 131, 254, 57, 241, 178, 82, 161, 204, 206, 79, 107, + 9, 65, 33, 228, 25, 82, 219, 117, 91, 217, 11, 223, 144, 85, 246, 26, 123, 216, 107, + 213, 33, 52, 188, 22, 198, 246, 71, 46, 245, 174, 16, 47, ]) .unwrap(); let expected_cpk: PublicKey = PublicKey::try_new([ - 149, 240, 55, 15, 178, 67, 245, 254, 44, 141, 95, 223, 238, 62, 85, 11, 248, 9, 11, 40, - 69, 211, 116, 13, 189, 35, 8, 95, 233, 154, 129, 58, + 142, 143, 238, 159, 105, 165, 224, 252, 108, 62, 53, 209, 176, 219, 249, 38, 90, 241, + 201, 81, 194, 146, 236, 5, 83, 152, 238, 243, 138, 16, 229, 15, ]) .unwrap(); @@ -189,26 +197,20 @@ mod tests { let cci = 13; let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); - print!( - "{} {}", - child_keys.csk.value()[0], - child_keys.csk.value()[1] - ); - let expected_ccc = [ - 50, 29, 113, 102, 49, 130, 64, 0, 247, 95, 135, 187, 118, 162, 65, 65, 194, 53, 189, - 242, 66, 178, 168, 2, 51, 193, 155, 72, 209, 2, 207, 251, + 79, 228, 242, 119, 211, 203, 198, 175, 95, 36, 4, 234, 139, 45, 137, 138, 54, 211, 187, + 16, 28, 79, 80, 232, 216, 101, 145, 19, 101, 220, 217, 141, ]; let expected_csk: PrivateKey = PrivateKey::try_new([ - 162, 32, 211, 190, 180, 74, 151, 246, 189, 93, 8, 57, 182, 239, 125, 245, 192, 255, 24, - 186, 251, 23, 194, 186, 252, 121, 190, 54, 147, 199, 1, 109, + 185, 147, 32, 242, 145, 91, 123, 77, 42, 33, 134, 84, 12, 165, 117, 70, 158, 201, 95, + 153, 14, 12, 92, 235, 128, 156, 194, 169, 68, 35, 165, 127, ]) .unwrap(); let expected_cpk: PublicKey = PublicKey::try_new([ - 183, 48, 207, 170, 221, 111, 118, 9, 40, 67, 123, 162, 159, 169, 34, 157, 23, 37, 232, - 102, 231, 187, 199, 191, 205, 146, 159, 22, 79, 100, 10, 223, + 119, 16, 145, 121, 97, 244, 186, 35, 136, 34, 140, 171, 206, 139, 11, 208, 207, 121, + 158, 45, 28, 22, 140, 98, 161, 179, 212, 173, 238, 220, 2, 34, ]) .unwrap(); @@ -230,19 +232,19 @@ mod tests { let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); let expected_ccc = [ - 101, 15, 69, 152, 144, 22, 105, 89, 175, 21, 13, 50, 160, 167, 93, 80, 94, 99, 192, - 252, 1, 126, 196, 217, 149, 164, 60, 75, 237, 90, 104, 83, + 221, 208, 47, 189, 174, 152, 33, 25, 151, 114, 233, 191, 57, 15, 40, 140, 46, 87, 126, + 58, 215, 40, 246, 111, 166, 113, 183, 145, 173, 11, 27, 182, ]; let expected_csk: PrivateKey = PrivateKey::try_new([ - 46, 196, 131, 199, 190, 180, 250, 222, 41, 188, 221, 156, 255, 239, 251, 207, 239, 202, - 166, 216, 107, 236, 195, 48, 167, 69, 97, 13, 132, 117, 76, 89, + 223, 29, 87, 189, 126, 24, 117, 225, 190, 57, 0, 143, 207, 168, 231, 139, 170, 192, 81, + 254, 126, 10, 115, 42, 141, 157, 70, 171, 199, 231, 198, 132, ]) .unwrap(); let expected_cpk: PublicKey = PublicKey::try_new([ - 93, 151, 154, 238, 175, 198, 53, 146, 255, 43, 37, 52, 214, 165, 69, 161, 38, 20, 68, - 166, 143, 80, 149, 216, 124, 203, 240, 114, 168, 111, 33, 83, + 96, 123, 245, 51, 214, 216, 215, 205, 70, 145, 105, 221, 166, 169, 122, 27, 94, 112, + 228, 110, 249, 177, 85, 173, 180, 248, 185, 199, 112, 246, 83, 33, ]) .unwrap(); diff --git a/key_protocol/src/key_management/key_tree/mod.rs b/key_protocol/src/key_management/key_tree/mod.rs index a94e8291..08a576e5 100644 --- a/key_protocol/src/key_management/key_tree/mod.rs +++ b/key_protocol/src/key_management/key_tree/mod.rs @@ -1,7 +1,7 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use anyhow::Result; -use common::sequencer_client::SequencerClient; +use nssa::{Account, AccountId}; use serde::{Deserialize, Serialize}; use crate::key_management::{ @@ -197,40 +197,6 @@ impl KeyTree { } impl KeyTree { - /// Cleanup of all non-initialized accounts in a private tree. - /// - /// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) < - /// depth`. - /// - /// If account is default, removes them. - /// - /// Chain must be parsed for accounts beforehand. - /// - /// Fast, leaves gaps between accounts. - pub fn cleanup_tree_remove_uninit_for_depth(&mut self, depth: u32) { - let mut id_stack = vec![ChainIndex::root()]; - - while let Some(curr_id) = id_stack.pop() { - if let Some(node) = self.key_map.get(&curr_id) - && node.value.1 == nssa::Account::default() - && curr_id != ChainIndex::root() - { - let addr = node.account_id(); - self.remove(addr); - } - - let mut next_id = curr_id.nth_child(0); - - while (next_id.depth()) < depth { - id_stack.push(next_id.clone()); - next_id = match next_id.next_in_line() { - Some(id) => id, - None => break, - }; - } - } - } - /// Cleanup of non-initialized accounts in a private tree. /// /// If account is default, removes them, stops at first non-default account. @@ -259,56 +225,17 @@ impl KeyTree { } impl KeyTree { - /// Cleanup of all non-initialized accounts in a public tree. - /// - /// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) < - /// depth`. - /// - /// If account is default, removes them. - /// - /// Fast, leaves gaps between accounts. - pub async fn cleanup_tree_remove_ininit_for_depth( - &mut self, - depth: u32, - client: Arc, - ) -> Result<()> { - let mut id_stack = vec![ChainIndex::root()]; - - while let Some(curr_id) = id_stack.pop() { - if let Some(node) = self.key_map.get(&curr_id) { - let address = node.account_id(); - let node_acc = client.get_account(address).await?.account; - - if node_acc == nssa::Account::default() && curr_id != ChainIndex::root() { - self.remove(address); - } - } - - let mut next_id = curr_id.nth_child(0); - - while (next_id.depth()) < depth { - id_stack.push(next_id.clone()); - next_id = match next_id.next_in_line() { - Some(id) => id, - None => break, - }; - } - } - - Ok(()) - } - /// Cleanup of non-initialized accounts in a public tree. /// /// If account is default, removes them, stops at first non-default account. /// - /// Walks through tree in lairs of same depth using `ChainIndex::chain_ids_at_depth()`. + /// Walks through tree in layers of same depth using `ChainIndex::chain_ids_at_depth()`. /// /// Slow, maintains tree consistency. - pub async fn cleanup_tree_remove_uninit_layered( + pub async fn cleanup_tree_remove_uninit_layered>>( &mut self, depth: u32, - client: Arc, + get_account: impl Fn(AccountId) -> F, ) -> Result<()> { let depth = usize::try_from(depth).expect("Depth is expected to fit in usize"); 'outer: for i in (1..depth).rev() { @@ -316,7 +243,7 @@ impl KeyTree { for id in ChainIndex::chain_ids_at_depth(i) { if let Some(node) = self.key_map.get(&id) { let address = node.account_id(); - let node_acc = client.get_account(address).await?.account; + let node_acc = get_account(address).await?; if node_acc == nssa::Account::default() { let addr = node.account_id(); diff --git a/key_protocol/src/key_management/mod.rs b/key_protocol/src/key_management/mod.rs index e29e5862..c038c415 100644 --- a/key_protocol/src/key_management/mod.rs +++ b/key_protocol/src/key_management/mod.rs @@ -16,7 +16,7 @@ pub type PublicAccountSigningKey = [u8; 32]; pub struct KeyChain { pub secret_spending_key: SecretSpendingKey, pub private_key_holder: PrivateKeyHolder, - pub nullifer_public_key: NullifierPublicKey, + pub nullifier_public_key: NullifierPublicKey, pub viewing_public_key: ViewingPublicKey, } @@ -30,35 +30,38 @@ impl KeyChain { let private_key_holder = secret_spending_key.produce_private_key_holder(None); - let nullifer_public_key = private_key_holder.generate_nullifier_public_key(); + let nullifier_public_key = private_key_holder.generate_nullifier_public_key(); let viewing_public_key = private_key_holder.generate_viewing_public_key(); Self { secret_spending_key, private_key_holder, - nullifer_public_key, + nullifier_public_key, viewing_public_key, } } #[must_use] - pub fn new_mnemonic(passphrase: String) -> Self { + pub fn new_mnemonic(passphrase: &str) -> (Self, bip39::Mnemonic) { // Currently dropping SeedHolder at the end of initialization. // Not entirely sure if we need it in the future. - let seed_holder = SeedHolder::new_mnemonic(passphrase); + let (seed_holder, mnemonic) = SeedHolder::new_mnemonic(passphrase); let secret_spending_key = seed_holder.produce_top_secret_key_holder(); let private_key_holder = secret_spending_key.produce_private_key_holder(None); - let nullifer_public_key = private_key_holder.generate_nullifier_public_key(); + let nullifier_public_key = private_key_holder.generate_nullifier_public_key(); let viewing_public_key = private_key_holder.generate_viewing_public_key(); - Self { - secret_spending_key, - private_key_holder, - nullifer_public_key, - viewing_public_key, - } + ( + Self { + secret_spending_key, + private_key_holder, + nullifier_public_key, + viewing_public_key, + }, + mnemonic, + ) } #[must_use] @@ -93,7 +96,7 @@ mod tests { // Check that key holder fields are initialized with expected types assert_ne!( - account_id_key_holder.nullifer_public_key.as_ref(), + account_id_key_holder.nullifier_public_key.as_ref(), &[0_u8; 32] ); } @@ -119,7 +122,7 @@ mod tests { let utxo_secret_key_holder = top_secret_key_holder.produce_private_key_holder(None); - let nullifer_public_key = utxo_secret_key_holder.generate_nullifier_public_key(); + let nullifier_public_key = utxo_secret_key_holder.generate_nullifier_public_key(); let viewing_public_key = utxo_secret_key_holder.generate_viewing_public_key(); let pub_account_signing_key = nssa::PrivateKey::new_os_random(); @@ -150,7 +153,7 @@ mod tests { println!("Account {:?}", account.value().to_base58()); println!( "Nulifier public key {:?}", - hex::encode(nullifer_public_key.to_byte_array()) + hex::encode(nullifier_public_key.to_byte_array()) ); println!( "Viewing public key {:?}", @@ -183,7 +186,7 @@ mod tests { fn non_trivial_chain_index() { let keys = account_with_chain_index_2_for_tests(); - let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifer_public_key); + let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifier_public_key); let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key); let key_receiver = keys.calculate_shared_secret_receiver( diff --git a/key_protocol/src/key_management/secret_holders.rs b/key_protocol/src/key_management/secret_holders.rs index db39757e..9804ba39 100644 --- a/key_protocol/src/key_management/secret_holders.rs +++ b/key_protocol/src/key_management/secret_holders.rs @@ -8,29 +8,26 @@ use rand::{RngCore as _, rngs::OsRng}; use serde::{Deserialize, Serialize}; use sha2::{Digest as _, digest::FixedOutput as _}; -const NSSA_ENTROPY_BYTES: [u8; 32] = [0; 32]; - -#[derive(Debug)] /// Seed holder. Non-clonable to ensure that different holders use different seeds. /// Produces `TopSecretKeyHolder` objects. +#[derive(Debug)] pub struct SeedHolder { // ToDo: Needs to be vec as serde derives is not implemented for [u8; 64] pub(crate) seed: Vec, } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] /// Secret spending key object. Can produce `PrivateKeyHolder` objects. -pub struct SecretSpendingKey(pub(crate) [u8; 32]); +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct SecretSpendingKey(pub [u8; 32]); pub type ViewingSecretKey = Scalar; #[derive(Serialize, Deserialize, Debug, Clone)] /// Private key holder. Produces public keys. Can produce `account_id`. Can produce shared secret /// for recepient. -#[expect(clippy::partial_pub_fields, reason = "TODO: fix later")] pub struct PrivateKeyHolder { pub nullifier_secret_key: NullifierSecretKey, - pub(crate) viewing_secret_key: ViewingSecretKey, + pub viewing_secret_key: ViewingSecretKey, } impl SeedHolder { @@ -49,9 +46,24 @@ impl SeedHolder { } #[must_use] - pub fn new_mnemonic(passphrase: String) -> Self { - let mnemonic = Mnemonic::from_entropy(&NSSA_ENTROPY_BYTES) - .expect("Enthropy must be a multiple of 32 bytes"); + pub fn new_mnemonic(passphrase: &str) -> (Self, Mnemonic) { + let mut entropy_bytes: [u8; 32] = [0; 32]; + OsRng.fill_bytes(&mut entropy_bytes); + + let mnemonic = + Mnemonic::from_entropy(&entropy_bytes).expect("Entropy must be a multiple of 32 bytes"); + let seed_wide = mnemonic.to_seed(passphrase); + + ( + Self { + seed: seed_wide.to_vec(), + }, + mnemonic, + ) + } + + #[must_use] + pub fn from_mnemonic(mnemonic: &Mnemonic, passphrase: &str) -> Self { let seed_wide = mnemonic.to_seed(passphrase); Self { @@ -79,6 +91,7 @@ impl SeedHolder { impl SecretSpendingKey { #[must_use] + #[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")] pub fn generate_nullifier_secret_key(&self, index: Option) -> NullifierSecretKey { const PREFIX: &[u8; 8] = b"LEE/keys"; const SUFFIX_1: &[u8; 1] = &[1]; @@ -93,13 +106,14 @@ impl SecretSpendingKey { hasher.update(PREFIX); hasher.update(self.0); hasher.update(SUFFIX_1); - hasher.update(index.to_le_bytes()); + hasher.update(index.to_be_bytes()); hasher.update(SUFFIX_2); ::from(hasher.finalize_fixed()) } #[must_use] + #[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")] pub fn generate_viewing_secret_key(&self, index: Option) -> ViewingSecretKey { const PREFIX: &[u8; 8] = b"LEE/keys"; const SUFFIX_1: &[u8; 1] = &[2]; @@ -114,7 +128,7 @@ impl SecretSpendingKey { hasher.update(PREFIX); hasher.update(self.0); hasher.update(SUFFIX_1); - hasher.update(index.to_le_bytes()); + hasher.update(index.to_be_bytes()); hasher.update(SUFFIX_2); hasher.finalize_fixed().into() @@ -174,12 +188,63 @@ mod tests { } #[test] - fn two_seeds_generated_same_from_same_mnemonic() { - let mnemonic = "test_pass"; + fn two_seeds_recovered_same_from_same_mnemonic() { + let passphrase = "test_pass"; - let seed_holder1 = SeedHolder::new_mnemonic(mnemonic.to_owned()); - let seed_holder2 = SeedHolder::new_mnemonic(mnemonic.to_owned()); + // Generate a mnemonic with random entropy + let (original_seed_holder, mnemonic) = SeedHolder::new_mnemonic(passphrase); - assert_eq!(seed_holder1.seed, seed_holder2.seed); + // Recover from the same mnemonic + let recovered_seed_holder = SeedHolder::from_mnemonic(&mnemonic, passphrase); + + assert_eq!(original_seed_holder.seed, recovered_seed_holder.seed); + } + + #[test] + fn new_mnemonic_generates_different_seeds_each_time() { + let (seed_holder1, mnemonic1) = SeedHolder::new_mnemonic(""); + let (seed_holder2, mnemonic2) = SeedHolder::new_mnemonic(""); + + // Different entropy should produce different mnemonics and seeds + assert_ne!(mnemonic1.to_string(), mnemonic2.to_string()); + assert_ne!(seed_holder1.seed, seed_holder2.seed); + } + + #[test] + fn new_mnemonic_generates_24_word_phrase() { + let (_seed_holder, mnemonic) = SeedHolder::new_mnemonic(""); + + // 256 bits of entropy produces a 24-word mnemonic + let word_count = mnemonic.to_string().split_whitespace().count(); + assert_eq!(word_count, 24); + } + + #[test] + fn new_mnemonic_produces_valid_seed_length() { + let (seed_holder, _mnemonic) = SeedHolder::new_mnemonic(""); + + assert_eq!(seed_holder.seed.len(), 64); + } + + #[test] + fn different_passphrases_produce_different_seeds() { + let (_seed_holder, mnemonic) = SeedHolder::new_mnemonic(""); + + let seed_with_pass_a = SeedHolder::from_mnemonic(&mnemonic, "password_a"); + let seed_with_pass_b = SeedHolder::from_mnemonic(&mnemonic, "password_b"); + + // Same mnemonic but different passphrases should produce different seeds + assert_ne!(seed_with_pass_a.seed, seed_with_pass_b.seed); + } + + #[test] + fn empty_passphrase_is_deterministic() { + let (_seed_holder, mnemonic) = SeedHolder::new_mnemonic(""); + + let seed1 = SeedHolder::from_mnemonic(&mnemonic, ""); + let seed2 = SeedHolder::from_mnemonic(&mnemonic, ""); + + // Same mnemonic and passphrase should always produce the same seed + assert_eq!(seed1.seed, seed2.seed); } } diff --git a/key_protocol/src/key_protocol_core/mod.rs b/key_protocol/src/key_protocol_core/mod.rs index abc1135f..8186865f 100644 --- a/key_protocol/src/key_protocol_core/mod.rs +++ b/key_protocol/src/key_protocol_core/mod.rs @@ -46,7 +46,7 @@ impl NSSAUserData { ) -> bool { let mut check_res = true; for (account_id, (key, _)) in accounts_keys_map { - let expected_account_id = nssa::AccountId::from(&key.nullifer_public_key); + let expected_account_id = nssa::AccountId::from(&key.nullifier_public_key); if expected_account_id != *account_id { println!("{expected_account_id}, {account_id}"); check_res = false; @@ -66,13 +66,13 @@ impl NSSAUserData { ) -> Result { if !Self::valid_public_key_transaction_pairing_check(&default_accounts_keys) { anyhow::bail!( - "Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys" + "Key transaction pairing check not satisfied, there are public account_ids, which are not derived from keys" ); } if !Self::valid_private_key_transaction_pairing_check(&default_accounts_key_chains) { anyhow::bail!( - "Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys" + "Key transaction pairing check not satisfied, there are private account_ids, which are not derived from keys" ); } @@ -181,11 +181,12 @@ impl NSSAUserData { impl Default for NSSAUserData { fn default() -> Self { + let (seed_holder, _mnemonic) = SeedHolder::new_mnemonic(""); Self::new_with_accounts( BTreeMap::new(), BTreeMap::new(), - KeyTreePublic::new(&SeedHolder::new_mnemonic("default".to_owned())), - KeyTreePrivate::new(&SeedHolder::new_mnemonic("default".to_owned())), + KeyTreePublic::new(&seed_holder), + KeyTreePrivate::new(&seed_holder), ) .unwrap() } diff --git a/nssa/Cargo.toml b/nssa/Cargo.toml index b50f189b..d8f0807c 100644 --- a/nssa/Cargo.toml +++ b/nssa/Cargo.toml @@ -9,16 +9,18 @@ workspace = true [dependencies] nssa_core = { workspace = true, features = ["host"] } +clock_core.workspace = true anyhow.workspace = true thiserror.workspace = true risc0-zkvm.workspace = true serde.workspace = true +serde_with.workspace = true sha2.workspace = true rand.workspace = true borsh.workspace = true hex.workspace = true -secp256k1 = "0.31.1" +k256.workspace = true risc0-binfmt = "3.0.2" log.workspace = true diff --git a/nssa/core/src/circuit_io.rs b/nssa/core/src/circuit_io.rs index 56d63022..998f6d71 100644 --- a/nssa/core/src/circuit_io.rs +++ b/nssa/core/src/circuit_io.rs @@ -5,7 +5,7 @@ use crate::{ NullifierSecretKey, SharedSecretKey, account::{Account, AccountWithMetadata}, encryption::Ciphertext, - program::{ProgramId, ProgramOutput}, + program::{BlockValidityWindow, ProgramId, ProgramOutput, TimestampValidityWindow}, }; #[derive(Serialize, Deserialize)] @@ -36,6 +36,8 @@ pub struct PrivacyPreservingCircuitOutput { pub ciphertexts: Vec, pub new_commitments: Vec, pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>, + pub block_validity_window: BlockValidityWindow, + pub timestamp_validity_window: TimestampValidityWindow, } #[cfg(feature = "host")] @@ -101,6 +103,8 @@ mod tests { ), [0xab; 32], )], + block_validity_window: (1..).into(), + timestamp_validity_window: TimestampValidityWindow::new_unbounded(), }; let bytes = output.to_bytes(); let output_from_slice: PrivacyPreservingCircuitOutput = from_slice(&bytes).unwrap(); diff --git a/nssa/core/src/commitment.rs b/nssa/core/src/commitment.rs index 36730dd0..24d5de87 100644 --- a/nssa/core/src/commitment.rs +++ b/nssa/core/src/commitment.rs @@ -12,8 +12,8 @@ use crate::{NullifierPublicKey, account::Account}; /// DUMMY_COMMITMENT = hasher.digest() /// ``` pub const DUMMY_COMMITMENT: Commitment = Commitment([ - 130, 75, 48, 230, 171, 101, 121, 141, 159, 118, 21, 74, 135, 248, 16, 255, 238, 156, 61, 24, - 165, 33, 34, 172, 227, 30, 215, 20, 85, 47, 230, 29, + 55, 228, 215, 207, 112, 221, 239, 49, 238, 79, 71, 135, 155, 15, 184, 45, 104, 74, 51, 211, + 238, 42, 160, 243, 15, 124, 253, 62, 3, 229, 90, 27, ]); /// The hash of the dummy commitment. @@ -24,8 +24,8 @@ pub const DUMMY_COMMITMENT: Commitment = Commitment([ /// DUMMY_COMMITMENT_HASH = hasher.digest() /// ``` pub const DUMMY_COMMITMENT_HASH: [u8; 32] = [ - 170, 10, 217, 228, 20, 35, 189, 177, 238, 235, 97, 129, 132, 89, 96, 247, 86, 91, 222, 214, 38, - 194, 216, 67, 56, 251, 208, 226, 0, 117, 149, 39, + 250, 237, 192, 113, 155, 101, 119, 30, 235, 183, 20, 84, 26, 32, 196, 229, 154, 74, 254, 249, + 129, 241, 118, 39, 41, 253, 141, 171, 184, 71, 8, 41, ]; #[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] @@ -50,10 +50,14 @@ impl std::fmt::Debug for Commitment { impl Commitment { /// Generates the commitment to a private account owned by user for npk: - /// SHA256(npk || `program_owner` || balance || nonce || SHA256(data)). + /// SHA256( `Comm_DS` || npk || `program_owner` || balance || nonce || SHA256(data)). #[must_use] pub fn new(npk: &NullifierPublicKey, account: &Account) -> Self { + const COMMITMENT_PREFIX: &[u8; 32] = + b"/LEE/v0.3/Commitment/\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; + let mut bytes = Vec::new(); + bytes.extend_from_slice(COMMITMENT_PREFIX); bytes.extend_from_slice(&npk.to_byte_array()); let account_bytes_with_hashed_data = { let mut this = Vec::new(); diff --git a/nssa/core/src/lib.rs b/nssa/core/src/lib.rs index 8014c7ca..a4fcdee1 100644 --- a/nssa/core/src/lib.rs +++ b/nssa/core/src/lib.rs @@ -21,3 +21,7 @@ pub mod program; #[cfg(feature = "host")] pub mod error; + +pub type BlockId = u64; +/// Unix timestamp in milliseconds. +pub type Timestamp = u64; diff --git a/nssa/core/src/nullifier.rs b/nssa/core/src/nullifier.rs index 6ba59860..0e15ec74 100644 --- a/nssa/core/src/nullifier.rs +++ b/nssa/core/src/nullifier.rs @@ -55,7 +55,7 @@ pub type NullifierSecretKey = [u8; 32]; #[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[cfg_attr( any(feature = "host", test), - derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash) + derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash) )] pub struct Nullifier(pub(super) [u8; 32]); @@ -76,7 +76,7 @@ impl Nullifier { /// Computes a nullifier for an account update. #[must_use] pub fn for_account_update(commitment: &Commitment, nsk: &NullifierSecretKey) -> Self { - const UPDATE_PREFIX: &[u8; 32] = b"/NSSA/v0.2/Nullifier/Update/\x00\x00\x00\x00"; + const UPDATE_PREFIX: &[u8; 32] = b"/LEE/v0.3/Nullifier/Update/\x00\x00\x00\x00\x00"; let mut bytes = UPDATE_PREFIX.to_vec(); bytes.extend_from_slice(&commitment.to_byte_array()); bytes.extend_from_slice(nsk); @@ -86,7 +86,7 @@ impl Nullifier { /// Computes a nullifier for an account initialization. #[must_use] pub fn for_account_initialization(npk: &NullifierPublicKey) -> Self { - const INIT_PREFIX: &[u8; 32] = b"/NSSA/v0.2/Nullifier/Initialize/"; + const INIT_PREFIX: &[u8; 32] = b"/LEE/v0.3/Nullifier/Initialize/\x00"; let mut bytes = INIT_PREFIX.to_vec(); bytes.extend_from_slice(&npk.to_byte_array()); Self(Impl::hash_bytes(&bytes).as_bytes().try_into().unwrap()) @@ -102,8 +102,8 @@ mod tests { let commitment = Commitment((0..32_u8).collect::>().try_into().unwrap()); let nsk = [0x42; 32]; let expected_nullifier = Nullifier([ - 148, 243, 116, 209, 140, 231, 211, 61, 35, 62, 114, 110, 143, 224, 82, 201, 221, 34, - 53, 80, 185, 48, 174, 28, 203, 43, 94, 187, 85, 199, 115, 81, + 70, 162, 122, 15, 33, 237, 244, 216, 89, 223, 90, 50, 94, 184, 210, 144, 174, 64, 189, + 254, 62, 255, 5, 1, 139, 227, 194, 185, 16, 30, 55, 48, ]); let nullifier = Nullifier::for_account_update(&commitment, &nsk); assert_eq!(nullifier, expected_nullifier); @@ -116,8 +116,8 @@ mod tests { 255, 29, 105, 42, 186, 43, 11, 157, 168, 132, 225, 17, 163, ]); let expected_nullifier = Nullifier([ - 1, 6, 59, 168, 16, 146, 65, 252, 255, 91, 48, 85, 116, 189, 110, 218, 110, 136, 163, - 193, 245, 103, 51, 27, 235, 170, 215, 115, 97, 144, 36, 238, + 149, 59, 95, 181, 2, 194, 20, 143, 72, 233, 104, 243, 59, 70, 67, 243, 110, 77, 109, + 132, 139, 111, 51, 125, 128, 92, 107, 46, 252, 4, 20, 149, ]); let nullifier = Nullifier::for_account_initialization(&npk); assert_eq!(nullifier, expected_nullifier); diff --git a/nssa/core/src/program.rs b/nssa/core/src/program.rs index 31b76b0f..a08fb2b4 100644 --- a/nssa/core/src/program.rs +++ b/nssa/core/src/program.rs @@ -1,9 +1,14 @@ use std::collections::HashSet; +#[cfg(any(feature = "host", test))] +use borsh::{BorshDeserialize, BorshSerialize}; use risc0_zkvm::{DeserializeOwned, guest::env, serde::Deserializer}; use serde::{Deserialize, Serialize}; -use crate::account::{Account, AccountId, AccountWithMetadata}; +use crate::{ + BlockId, Timestamp, + account::{Account, AccountId, AccountWithMetadata}, +}; pub const DEFAULT_PROGRAM_ID: ProgramId = [0; 8]; pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; @@ -11,6 +16,8 @@ pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; pub type ProgramId = [u32; 8]; pub type InstructionData = Vec; pub struct ProgramInput { + pub self_program_id: ProgramId, + pub caller_program_id: Option, pub pre_states: Vec, pub instruction: T, } @@ -20,7 +27,7 @@ pub struct ProgramInput { /// Each program can derive up to `2^256` unique account IDs by choosing different /// seeds. PDAs allow programs to control namespaced account identifiers without /// collisions between programs. -#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub struct PdaSeed([u8; 32]); impl PdaSeed { @@ -89,11 +96,26 @@ impl ChainedCall { /// A post state may optionally request that the executing program /// becomes the owner of the account (a “claim”). This is used to signal /// that the program intends to take ownership of the account. -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(any(feature = "host", test), derive(PartialEq, Eq))] pub struct AccountPostState { account: Account, - claim: bool, + claim: Option, +} + +/// A claim request for an account, indicating that the executing program intends to take ownership +/// of the account. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum Claim { + /// The program requests ownership of the account which was authorized by the signer. + /// + /// Note that it's possible to successfully execute program outputting [`AccountPostState`] with + /// `is_authorized == false` and `claim == Some(Claim::Authorized)`. + /// This will give no error if program had authorization in pre state and may be useful + /// if program decides to give up authorization for a chained call. + Authorized, + /// The program requests ownership of the account through a PDA. + Pda(PdaSeed), } impl AccountPostState { @@ -103,7 +125,7 @@ impl AccountPostState { pub const fn new(account: Account) -> Self { Self { account, - claim: false, + claim: None, } } @@ -111,25 +133,27 @@ impl AccountPostState { /// This indicates that the executing program intends to claim the /// account as its own and is allowed to mutate it. #[must_use] - pub const fn new_claimed(account: Account) -> Self { + pub const fn new_claimed(account: Account, claim: Claim) -> Self { Self { account, - claim: true, + claim: Some(claim), } } /// Creates a post state that requests ownership of the account /// if the account's program owner is the default program ID. #[must_use] - pub fn new_claimed_if_default(account: Account) -> Self { - let claim = account.program_owner == DEFAULT_PROGRAM_ID; - Self { account, claim } + pub fn new_claimed_if_default(account: Account, claim: Claim) -> Self { + let is_default_owner = account.program_owner == DEFAULT_PROGRAM_ID; + Self { + account, + claim: is_default_owner.then_some(claim), + } } - /// Returns `true` if this post state requests that the account - /// be claimed (owned) by the executing program. + /// Returns whether this post state requires a claim. #[must_use] - pub const fn requires_claim(&self) -> bool { + pub const fn required_claim(&self) -> Option { self.claim } @@ -140,6 +164,7 @@ impl AccountPostState { } /// Returns the underlying account. + #[must_use] pub const fn account_mut(&mut self) -> &mut Account { &mut self.account } @@ -151,15 +176,209 @@ impl AccountPostState { } } +pub type BlockValidityWindow = ValidityWindow; +pub type TimestampValidityWindow = ValidityWindow; + +#[derive(Clone, Copy, Serialize, Deserialize)] +#[cfg_attr( + any(feature = "host", test), + derive(Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize) +)] +pub struct ValidityWindow { + from: Option, + to: Option, +} + +impl ValidityWindow { + /// Creates a window with no bounds. + #[must_use] + pub const fn new_unbounded() -> Self { + Self { + from: None, + to: None, + } + } +} + +impl ValidityWindow { + /// Valid for values in the range [from, to), where `from` is included and `to` is excluded. + #[must_use] + pub fn is_valid_for(&self, value: T) -> bool { + self.from.is_none_or(|start| value >= start) && self.to.is_none_or(|end| value < end) + } + + /// Returns `Err(InvalidWindow)` if both bounds are set and `from >= to`. + fn check_window(&self) -> Result<(), InvalidWindow> { + if let (Some(from), Some(to)) = (self.from, self.to) + && from >= to + { + return Err(InvalidWindow); + } + Ok(()) + } + + /// Inclusive lower bound. `None` means no lower bound. + #[must_use] + pub const fn start(&self) -> Option { + self.from + } + + /// Exclusive upper bound. `None` means no upper bound. + #[must_use] + pub const fn end(&self) -> Option { + self.to + } +} + +impl TryFrom<(Option, Option)> for ValidityWindow { + type Error = InvalidWindow; + + fn try_from(value: (Option, Option)) -> Result { + let this = Self { + from: value.0, + to: value.1, + }; + this.check_window()?; + Ok(this) + } +} + +impl TryFrom> for ValidityWindow { + type Error = InvalidWindow; + + fn try_from(value: std::ops::Range) -> Result { + (Some(value.start), Some(value.end)).try_into() + } +} + +impl From> for ValidityWindow { + fn from(value: std::ops::RangeFrom) -> Self { + Self { + from: Some(value.start), + to: None, + } + } +} + +impl From> for ValidityWindow { + fn from(value: std::ops::RangeTo) -> Self { + Self { + from: None, + to: Some(value.end), + } + } +} + +impl From for ValidityWindow { + fn from(_: std::ops::RangeFull) -> Self { + Self::new_unbounded() + } +} + +#[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)] +#[error("Invalid window")] +pub struct InvalidWindow; + #[derive(Serialize, Deserialize, Clone)] #[cfg_attr(any(feature = "host", test), derive(Debug, PartialEq, Eq))] +#[must_use = "ProgramOutput does nothing unless written"] pub struct ProgramOutput { + /// The program ID of the program that produced this output. + pub self_program_id: ProgramId, + /// The program ID of the caller that invoked this program via a chained call, + /// or `None` if this is a top-level call. + pub caller_program_id: Option, /// The instruction data the program received to produce this output. pub instruction_data: InstructionData, /// The account pre states the program received to produce this output. pub pre_states: Vec, + /// The account post states the program execution produced. pub post_states: Vec, + /// The list of chained calls to other programs. pub chained_calls: Vec, + /// The block ID window where the program output is valid. + pub block_validity_window: BlockValidityWindow, + /// The timestamp window where the program output is valid. + pub timestamp_validity_window: TimestampValidityWindow, +} + +impl ProgramOutput { + pub const fn new( + self_program_id: ProgramId, + caller_program_id: Option, + instruction_data: InstructionData, + pre_states: Vec, + post_states: Vec, + ) -> Self { + Self { + self_program_id, + caller_program_id, + instruction_data, + pre_states, + post_states, + chained_calls: Vec::new(), + block_validity_window: ValidityWindow::new_unbounded(), + timestamp_validity_window: ValidityWindow::new_unbounded(), + } + } + + pub fn write(self) { + env::commit(&self); + } + + pub fn with_chained_calls(mut self, chained_calls: Vec) -> Self { + self.chained_calls = chained_calls; + self + } + + /// Sets the block ID validity window from an infallible range conversion (`1..`, `..5`, `..`). + pub fn with_block_validity_window>(mut self, window: W) -> Self { + self.block_validity_window = window.into(); + self + } + + /// Sets the block ID validity window from a fallible range conversion (`1..5`). + /// Returns `Err` if the range is empty. + pub fn try_with_block_validity_window< + W: TryInto, + >( + mut self, + window: W, + ) -> Result { + self.block_validity_window = window.try_into()?; + Ok(self) + } + + /// Sets the timestamp validity window from an infallible range conversion. + pub fn with_timestamp_validity_window>( + mut self, + window: W, + ) -> Self { + self.timestamp_validity_window = window.into(); + self + } + + /// Sets the timestamp validity window from a fallible range conversion. + /// Returns `Err` if the range is empty. + pub fn try_with_timestamp_validity_window< + W: TryInto, + >( + mut self, + window: W, + ) -> Result { + self.timestamp_validity_window = window.try_into()?; + Ok(self) + } + + pub fn valid_from_timestamp(mut self, ts: Option) -> Result { + self.timestamp_validity_window = (ts, self.timestamp_validity_window.end()).try_into()?; + Ok(self) + } + + pub fn valid_until_timestamp(mut self, ts: Option) -> Result { + self.timestamp_validity_window = (self.timestamp_validity_window.start(), ts).try_into()?; + Ok(self) + } } /// Representation of a number as `lo + hi * 2^128`. @@ -207,11 +426,15 @@ pub fn compute_authorized_pdas( /// Reads the NSSA inputs from the guest environment. #[must_use] pub fn read_nssa_inputs() -> (ProgramInput, InstructionData) { + let self_program_id: ProgramId = env::read(); + let caller_program_id: Option = env::read(); let pre_states: Vec = env::read(); let instruction_words: InstructionData = env::read(); let instruction = T::deserialize(&mut Deserializer::new(instruction_words.as_ref())).unwrap(); ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction, }, @@ -219,35 +442,6 @@ pub fn read_nssa_inputs() -> (ProgramInput, InstructionD ) } -pub fn write_nssa_outputs( - instruction_data: InstructionData, - pre_states: Vec, - post_states: Vec, -) { - let output = ProgramOutput { - instruction_data, - pre_states, - post_states, - chained_calls: Vec::new(), - }; - env::commit(&output); -} - -pub fn write_nssa_outputs_with_chained_call( - instruction_data: InstructionData, - pre_states: Vec, - post_states: Vec, - chained_calls: Vec, -) { - let output = ProgramOutput { - instruction_data, - pre_states, - post_states, - chained_calls, - }; - env::commit(&output); -} - /// Validates well-behaved program execution. /// /// # Parameters @@ -342,6 +536,135 @@ fn validate_uniqueness_of_account_ids(pre_states: &[AccountWithMetadata]) -> boo mod tests { use super::*; + #[test] + fn validity_window_unbounded_accepts_any_value() { + let w: ValidityWindow = ValidityWindow::new_unbounded(); + assert!(w.is_valid_for(0)); + assert!(w.is_valid_for(u64::MAX)); + } + + #[test] + fn validity_window_bounded_range_includes_from_excludes_to() { + let w: ValidityWindow = (Some(5), Some(10)).try_into().unwrap(); + assert!(!w.is_valid_for(4)); + assert!(w.is_valid_for(5)); + assert!(w.is_valid_for(9)); + assert!(!w.is_valid_for(10)); + } + + #[test] + fn validity_window_only_from_bound() { + let w: ValidityWindow = (Some(5), None).try_into().unwrap(); + assert!(!w.is_valid_for(4)); + assert!(w.is_valid_for(5)); + assert!(w.is_valid_for(u64::MAX)); + } + + #[test] + fn validity_window_only_to_bound() { + let w: ValidityWindow = (None, Some(5)).try_into().unwrap(); + assert!(w.is_valid_for(0)); + assert!(w.is_valid_for(4)); + assert!(!w.is_valid_for(5)); + } + + #[test] + fn validity_window_adjacent_bounds_are_invalid() { + // [5, 5) is an empty range — from == to + assert!(ValidityWindow::::try_from((Some(5), Some(5))).is_err()); + } + + #[test] + fn validity_window_inverted_bounds_are_invalid() { + assert!(ValidityWindow::::try_from((Some(10), Some(5))).is_err()); + } + + #[test] + fn validity_window_getters_match_construction() { + let w: ValidityWindow = (Some(3), Some(7)).try_into().unwrap(); + assert_eq!(w.start(), Some(3)); + assert_eq!(w.end(), Some(7)); + } + + #[test] + fn validity_window_getters_for_unbounded() { + let w: ValidityWindow = ValidityWindow::new_unbounded(); + assert_eq!(w.start(), None); + assert_eq!(w.end(), None); + } + + #[test] + fn validity_window_from_range() { + let w: ValidityWindow = ValidityWindow::try_from(5_u64..10).unwrap(); + assert_eq!(w.start(), Some(5)); + assert_eq!(w.end(), Some(10)); + } + + #[test] + fn validity_window_from_range_empty_is_invalid() { + assert!(ValidityWindow::::try_from(5_u64..5).is_err()); + } + + #[test] + fn validity_window_from_range_inverted_is_invalid() { + let from = 10_u64; + let to = 5_u64; + assert!(ValidityWindow::::try_from(from..to).is_err()); + } + + #[test] + fn validity_window_from_range_from() { + let w: ValidityWindow = (5_u64..).into(); + assert_eq!(w.start(), Some(5)); + assert_eq!(w.end(), None); + } + + #[test] + fn validity_window_from_range_to() { + let w: ValidityWindow = (..10_u64).into(); + assert_eq!(w.start(), None); + assert_eq!(w.end(), Some(10)); + } + + #[test] + fn validity_window_from_range_full() { + let w: ValidityWindow = (..).into(); + assert_eq!(w.start(), None); + assert_eq!(w.end(), None); + } + + #[test] + fn program_output_try_with_block_validity_window_range() { + let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![]) + .try_with_block_validity_window(10_u64..100) + .unwrap(); + assert_eq!(output.block_validity_window.start(), Some(10)); + assert_eq!(output.block_validity_window.end(), Some(100)); + } + + #[test] + fn program_output_with_block_validity_window_range_from() { + let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![]) + .with_block_validity_window(10_u64..); + assert_eq!(output.block_validity_window.start(), Some(10)); + assert_eq!(output.block_validity_window.end(), None); + } + + #[test] + fn program_output_with_block_validity_window_range_to() { + let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![]) + .with_block_validity_window(..100_u64); + assert_eq!(output.block_validity_window.start(), None); + assert_eq!(output.block_validity_window.end(), Some(100)); + } + + #[test] + fn program_output_try_with_block_validity_window_empty_range_fails() { + let result = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![]) + .try_with_block_validity_window(5_u64..5); + assert!(result.is_err()); + } + #[test] fn post_state_new_with_claim_constructor() { let account = Account { @@ -351,10 +674,10 @@ mod tests { nonce: 10_u128.into(), }; - let account_post_state = AccountPostState::new_claimed(account.clone()); + let account_post_state = AccountPostState::new_claimed(account.clone(), Claim::Authorized); assert_eq!(account, account_post_state.account); - assert!(account_post_state.requires_claim()); + assert_eq!(account_post_state.required_claim(), Some(Claim::Authorized)); } #[test] @@ -369,7 +692,7 @@ mod tests { let account_post_state = AccountPostState::new(account.clone()); assert_eq!(account, account_post_state.account); - assert!(!account_post_state.requires_claim()); + assert!(account_post_state.required_claim().is_none()); } #[test] diff --git a/nssa/src/error.rs b/nssa/src/error.rs index 3576b366..61966515 100644 --- a/nssa/src/error.rs +++ b/nssa/src/error.rs @@ -29,7 +29,10 @@ pub enum NssaError { Io(#[from] io::Error), #[error("Invalid Public Key")] - InvalidPublicKey(#[source] secp256k1::Error), + InvalidPublicKey(#[source] k256::schnorr::Error), + + #[error("Invalid hex for public key")] + InvalidHexPublicKey(hex::FromHexError), #[error("Risc0 error: {0}")] ProgramWriteInputFailed(String), @@ -69,6 +72,9 @@ pub enum NssaError { #[error("Max account nonce reached")] MaxAccountNonceReached, + + #[error("Execution outside of the validity window")] + OutOfValidityWindow, } #[cfg(test)] diff --git a/nssa/src/lib.rs b/nssa/src/lib.rs index bc7cf121..f4c3be9d 100644 --- a/nssa/src/lib.rs +++ b/nssa/src/lib.rs @@ -16,7 +16,11 @@ pub use program_deployment_transaction::ProgramDeploymentTransaction; pub use program_methods::PRIVACY_PRESERVING_CIRCUIT_ID; pub use public_transaction::PublicTransaction; pub use signature::{PrivateKey, PublicKey, Signature}; -pub use state::V02State; +pub use state::{ + CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID, + CLOCK_PROGRAM_ACCOUNT_IDS, V03State, +}; +pub use validated_state_diff::ValidatedStateDiff; pub mod encoding; pub mod error; @@ -27,6 +31,7 @@ pub mod program_deployment_transaction; pub mod public_transaction; mod signature; mod state; +mod validated_state_diff; pub mod program_methods { include!(concat!(env!("OUT_DIR"), "/program_methods/mod.rs")); diff --git a/nssa/src/privacy_preserving_transaction/circuit.rs b/nssa/src/privacy_preserving_transaction/circuit.rs index 2ab141a3..6c174450 100644 --- a/nssa/src/privacy_preserving_transaction/circuit.rs +++ b/nssa/src/privacy_preserving_transaction/circuit.rs @@ -87,15 +87,16 @@ pub fn execute_and_prove( pda_seeds: vec![], }; - let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program)]); + let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program, None)]); let mut chain_calls_counter = 0; - while let Some((chained_call, program)) = chained_calls.pop_front() { + while let Some((chained_call, program, caller_program_id)) = chained_calls.pop_front() { if chain_calls_counter >= MAX_NUMBER_CHAINED_CALLS { return Err(NssaError::MaxChainedCallsDepthExceeded); } let inner_receipt = execute_and_prove_program( program, + caller_program_id, &chained_call.pre_states, &chained_call.instruction_data, )?; @@ -115,7 +116,7 @@ pub fn execute_and_prove( let next_program = dependencies .get(&new_call.program_id) .ok_or(NssaError::InvalidProgramBehavior)?; - chained_calls.push_front((new_call, next_program)); + chained_calls.push_front((new_call, next_program, Some(chained_call.program_id))); } chain_calls_counter = chain_calls_counter @@ -153,12 +154,19 @@ pub fn execute_and_prove( fn execute_and_prove_program( program: &Program, + caller_program_id: Option, pre_states: &[AccountWithMetadata], instruction_data: &InstructionData, ) -> Result { // Write inputs to the program let mut env_builder = ExecutorEnv::builder(); - Program::write_inputs(pre_states, instruction_data, &mut env_builder)?; + Program::write_inputs( + program.id(), + caller_program_id, + pre_states, + instruction_data, + &mut env_builder, + )?; let env = env_builder.build().unwrap(); // Prove the program @@ -174,12 +182,13 @@ mod tests { #![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")] use nssa_core::{ - Commitment, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier, + Commitment, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier, SharedSecretKey, account::{Account, AccountId, AccountWithMetadata, Nonce, data::Data}, }; use super::*; use crate::{ + error::NssaError, privacy_preserving_transaction::circuit::execute_and_prove, program::Program, state::{ @@ -364,4 +373,46 @@ mod tests { .unwrap(); assert_eq!(recipient_post, expected_private_account_2); } + + #[test] + fn circuit_fails_when_chained_validity_windows_have_empty_intersection() { + let account_keys = test_private_account_keys_1(); + let pre = AccountWithMetadata::new( + Account::default(), + false, + AccountId::from(&account_keys.npk()), + ); + + let validity_window_chain_caller = Program::validity_window_chain_caller(); + let validity_window = Program::validity_window(); + + let instruction = Program::serialize_instruction(( + Some(1_u64), + Some(4_u64), + validity_window.id(), + Some(4_u64), + Some(7_u64), + )) + .unwrap(); + + let esk = [3; 32]; + let shared_secret = SharedSecretKey::new(&esk, &account_keys.vpk()); + + let program_with_deps = ProgramWithDependencies::new( + validity_window_chain_caller, + [(validity_window.id(), validity_window)].into(), + ); + + let result = execute_and_prove( + vec![pre], + instruction, + vec![2], + vec![(account_keys.npk(), shared_secret)], + vec![], + vec![None], + &program_with_deps, + ); + + assert!(matches!(result, Err(NssaError::CircuitProvingError(_)))); + } } diff --git a/nssa/src/privacy_preserving_transaction/message.rs b/nssa/src/privacy_preserving_transaction/message.rs index 4b93e820..85f4a202 100644 --- a/nssa/src/privacy_preserving_transaction/message.rs +++ b/nssa/src/privacy_preserving_transaction/message.rs @@ -3,6 +3,7 @@ use nssa_core::{ Commitment, CommitmentSetDigest, Nullifier, NullifierPublicKey, PrivacyPreservingCircuitOutput, account::{Account, Nonce}, encryption::{Ciphertext, EphemeralPublicKey, ViewingPublicKey}, + program::{BlockValidityWindow, TimestampValidityWindow}, }; use sha2::{Digest as _, Sha256}; @@ -32,11 +33,11 @@ impl EncryptedAccountData { } } - /// Computes the tag as the first byte of SHA256("/NSSA/v0.2/ViewTag/" || Npk || vpk). + /// Computes the tag as the first byte of SHA256("/LEE/v0.3/ViewTag/" || Npk || vpk). #[must_use] pub fn compute_view_tag(npk: &NullifierPublicKey, vpk: &ViewingPublicKey) -> ViewTag { let mut hasher = Sha256::new(); - hasher.update(b"/NSSA/v0.2/ViewTag/"); + hasher.update(b"/LEE/v0.3/ViewTag/"); hasher.update(npk.to_byte_array()); hasher.update(vpk.to_bytes()); let digest: [u8; 32] = hasher.finalize().into(); @@ -52,6 +53,8 @@ pub struct Message { pub encrypted_private_post_states: Vec, pub new_commitments: Vec, pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>, + pub block_validity_window: BlockValidityWindow, + pub timestamp_validity_window: TimestampValidityWindow, } impl std::fmt::Debug for Message { @@ -77,6 +80,8 @@ impl std::fmt::Debug for Message { ) .field("new_commitments", &self.new_commitments) .field("new_nullifiers", &nullifiers) + .field("block_validity_window", &self.block_validity_window) + .field("timestamp_validity_window", &self.timestamp_validity_window) .finish() } } @@ -109,6 +114,8 @@ impl Message { encrypted_private_post_states, new_commitments: output.new_commitments, new_nullifiers: output.new_nullifiers, + block_validity_window: output.block_validity_window, + timestamp_validity_window: output.timestamp_validity_window, }) } } @@ -119,6 +126,7 @@ pub mod tests { Commitment, EncryptionScheme, Nullifier, NullifierPublicKey, SharedSecretKey, account::Account, encryption::{EphemeralPublicKey, ViewingPublicKey}, + program::{BlockValidityWindow, TimestampValidityWindow}, }; use sha2::{Digest as _, Sha256}; @@ -161,6 +169,8 @@ pub mod tests { encrypted_private_post_states, new_commitments, new_nullifiers, + block_validity_window: BlockValidityWindow::new_unbounded(), + timestamp_validity_window: TimestampValidityWindow::new_unbounded(), } } @@ -179,7 +189,7 @@ pub mod tests { let expected_view_tag = { let mut hasher = Sha256::new(); - hasher.update(b"/NSSA/v0.2/ViewTag/"); + hasher.update(b"/LEE/v0.3/ViewTag/"); hasher.update(npk.to_byte_array()); hasher.update(vpk.to_bytes()); let digest: [u8; 32] = hasher.finalize().into(); diff --git a/nssa/src/privacy_preserving_transaction/transaction.rs b/nssa/src/privacy_preserving_transaction/transaction.rs index 2b268c07..2e46f628 100644 --- a/nssa/src/privacy_preserving_transaction/transaction.rs +++ b/nssa/src/privacy_preserving_transaction/transaction.rs @@ -1,21 +1,10 @@ -use std::{ - collections::{HashMap, HashSet}, - hash::Hash, -}; +use std::collections::HashSet; use borsh::{BorshDeserialize, BorshSerialize}; -use nssa_core::{ - Commitment, CommitmentSetDigest, Nullifier, PrivacyPreservingCircuitOutput, - account::{Account, AccountWithMetadata}, -}; +use nssa_core::account::AccountId; use sha2::{Digest as _, digest::FixedOutput as _}; use super::{message::Message, witness_set::WitnessSet}; -use crate::{ - AccountId, V02State, - error::NssaError, - privacy_preserving_transaction::{circuit::Proof, message::EncryptedAccountData}, -}; #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct PrivacyPreservingTransaction { @@ -32,102 +21,6 @@ impl PrivacyPreservingTransaction { } } - pub(crate) fn validate_and_produce_public_state_diff( - &self, - state: &V02State, - ) -> Result, NssaError> { - let message = &self.message; - let witness_set = &self.witness_set; - - // 1. Commitments or nullifiers are non empty - if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() { - return Err(NssaError::InvalidInput( - "Empty commitments and empty nullifiers found in message".into(), - )); - } - - // 2. Check there are no duplicate account_ids in the public_account_ids list. - if n_unique(&message.public_account_ids) != message.public_account_ids.len() { - return Err(NssaError::InvalidInput( - "Duplicate account_ids found in message".into(), - )); - } - - // Check there are no duplicate nullifiers in the new_nullifiers list - if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() { - return Err(NssaError::InvalidInput( - "Duplicate nullifiers found in message".into(), - )); - } - - // Check there are no duplicate commitments in the new_commitments list - if n_unique(&message.new_commitments) != message.new_commitments.len() { - return Err(NssaError::InvalidInput( - "Duplicate commitments found in message".into(), - )); - } - - // 3. Nonce checks and Valid signatures - // Check exactly one nonce is provided for each signature - if message.nonces.len() != witness_set.signatures_and_public_keys.len() { - return Err(NssaError::InvalidInput( - "Mismatch between number of nonces and signatures/public keys".into(), - )); - } - - // Check the signatures are valid - if !witness_set.signatures_are_valid_for(message) { - return Err(NssaError::InvalidInput( - "Invalid signature for given message and public key".into(), - )); - } - - let signer_account_ids = self.signer_account_ids(); - // Check nonces corresponds to the current nonces on the public state. - for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) { - let current_nonce = state.get_account_by_id(*account_id).nonce; - if current_nonce != *nonce { - return Err(NssaError::InvalidInput("Nonce mismatch".into())); - } - } - - // Build pre_states for proof verification - let public_pre_states: Vec<_> = message - .public_account_ids - .iter() - .map(|account_id| { - AccountWithMetadata::new( - state.get_account_by_id(*account_id), - signer_account_ids.contains(account_id), - *account_id, - ) - }) - .collect(); - - // 4. Proof verification - check_privacy_preserving_circuit_proof_is_valid( - &witness_set.proof, - &public_pre_states, - &message.public_post_states, - &message.encrypted_private_post_states, - &message.new_commitments, - &message.new_nullifiers, - )?; - - // 5. Commitment freshness - state.check_commitments_are_new(&message.new_commitments)?; - - // 6. Nullifier uniqueness - state.check_nullifiers_are_valid(&message.new_nullifiers)?; - - Ok(message - .public_account_ids - .iter() - .copied() - .zip(message.public_post_states.clone()) - .collect()) - } - #[must_use] pub const fn message(&self) -> &Message { &self.message @@ -166,36 +59,6 @@ impl PrivacyPreservingTransaction { } } -fn check_privacy_preserving_circuit_proof_is_valid( - proof: &Proof, - public_pre_states: &[AccountWithMetadata], - public_post_states: &[Account], - encrypted_private_post_states: &[EncryptedAccountData], - new_commitments: &[Commitment], - new_nullifiers: &[(Nullifier, CommitmentSetDigest)], -) -> Result<(), NssaError> { - let output = PrivacyPreservingCircuitOutput { - public_pre_states: public_pre_states.to_vec(), - public_post_states: public_post_states.to_vec(), - ciphertexts: encrypted_private_post_states - .iter() - .cloned() - .map(|value| value.ciphertext) - .collect(), - new_commitments: new_commitments.to_vec(), - new_nullifiers: new_nullifiers.to_vec(), - }; - proof - .is_valid_for(&output) - .then_some(()) - .ok_or(NssaError::InvalidPrivacyPreservingProof) -} - -fn n_unique(data: &[T]) -> usize { - let set: HashSet<&T> = data.iter().collect(); - set.len() -} - #[cfg(test)] mod tests { use crate::{ diff --git a/nssa/src/program.rs b/nssa/src/program.rs index 3b372a22..698032e2 100644 --- a/nssa/src/program.rs +++ b/nssa/src/program.rs @@ -8,7 +8,11 @@ use serde::Serialize; use crate::{ error::NssaError, - program_methods::{AMM_ELF, AUTHENTICATED_TRANSFER_ELF, PINATA_ELF, TOKEN_ELF}, + program_methods::{ + AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID, + AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF, + PINATA_ID, TOKEN_ELF, TOKEN_ID, + }, }; /// Maximum number of cycles for a public execution. @@ -50,13 +54,20 @@ impl Program { pub(crate) fn execute( &self, + caller_program_id: Option, pre_states: &[AccountWithMetadata], instruction_data: &InstructionData, ) -> Result { // Write inputs to the program let mut env_builder = ExecutorEnv::builder(); env_builder.session_limit(Some(MAX_NUM_CYCLES_PUBLIC_EXECUTION)); - Self::write_inputs(pre_states, instruction_data, &mut env_builder)?; + Self::write_inputs( + self.id, + caller_program_id, + pre_states, + instruction_data, + &mut env_builder, + )?; let env = env_builder.build().unwrap(); // Execute the program (without proving) @@ -76,34 +87,66 @@ impl Program { /// Writes inputs to `env_builder` in the order expected by the programs. pub(crate) fn write_inputs( + program_id: ProgramId, + caller_program_id: Option, pre_states: &[AccountWithMetadata], instruction_data: &[u32], env_builder: &mut ExecutorEnvBuilder, ) -> Result<(), NssaError> { + env_builder + .write(&program_id) + .map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?; + env_builder + .write(&caller_program_id) + .map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?; let pre_states = pre_states.to_vec(); env_builder - .write(&(pre_states, instruction_data)) + .write(&pre_states) + .map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?; + env_builder + .write(&instruction_data) .map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?; Ok(()) } #[must_use] pub fn authenticated_transfer_program() -> Self { - // This unwrap won't panic since the `AUTHENTICATED_TRANSFER_ELF` comes from risc0 build of - // `program_methods` - Self::new(AUTHENTICATED_TRANSFER_ELF.to_vec()).unwrap() + Self { + id: AUTHENTICATED_TRANSFER_ID, + elf: AUTHENTICATED_TRANSFER_ELF.to_vec(), + } } #[must_use] pub fn token() -> Self { - // This unwrap won't panic since the `TOKEN_ELF` comes from risc0 build of - // `program_methods` - Self::new(TOKEN_ELF.to_vec()).unwrap() + Self { + id: TOKEN_ID, + elf: TOKEN_ELF.to_vec(), + } } #[must_use] pub fn amm() -> Self { - Self::new(AMM_ELF.to_vec()).expect("The AMM program must be a valid Risc0 program") + Self { + id: AMM_ID, + elf: AMM_ELF.to_vec(), + } + } + + #[must_use] + pub fn clock() -> Self { + Self { + id: CLOCK_ID, + elf: CLOCK_ELF.to_vec(), + } + } + + #[must_use] + pub fn ata() -> Self { + Self { + id: ASSOCIATED_TOKEN_ACCOUNT_ID, + elf: ASSOCIATED_TOKEN_ACCOUNT_ELF.to_vec(), + } } } @@ -111,16 +154,19 @@ impl Program { impl Program { #[must_use] pub fn pinata() -> Self { - // This unwrap won't panic since the `PINATA_ELF` comes from risc0 build of - // `program_methods` - Self::new(PINATA_ELF.to_vec()).unwrap() + Self { + id: PINATA_ID, + elf: PINATA_ELF.to_vec(), + } } #[must_use] - #[expect(clippy::non_ascii_literal, reason = "More readable")] pub fn pinata_token() -> Self { - use crate::program_methods::PINATA_TOKEN_ELF; - Self::new(PINATA_TOKEN_ELF.to_vec()).expect("Piñata program must be a valid R0BF file") + use crate::program_methods::{PINATA_TOKEN_ELF, PINATA_TOKEN_ID}; + Self { + id: PINATA_TOKEN_ID, + elf: PINATA_TOKEN_ELF.to_vec(), + } } } @@ -131,8 +177,9 @@ mod tests { use crate::{ program::Program, program_methods::{ - AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, PINATA_ELF, PINATA_ID, - TOKEN_ELF, TOKEN_ID, + AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID, + AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF, + PINATA_ID, PINATA_TOKEN_ELF, PINATA_TOKEN_ID, TOKEN_ELF, TOKEN_ID, }, }; @@ -279,10 +326,71 @@ mod tests { #[must_use] pub fn modified_transfer_program() -> Self { - use test_program_methods::MODIFIED_TRANSFER_ELF; - // This unwrap won't panic since the `MODIFIED_TRANSFER_ELF` comes from risc0 build of - // `program_methods` - Self::new(MODIFIED_TRANSFER_ELF.to_vec()).unwrap() + use test_program_methods::{MODIFIED_TRANSFER_ELF, MODIFIED_TRANSFER_ID}; + Self { + id: MODIFIED_TRANSFER_ID, + elf: MODIFIED_TRANSFER_ELF.to_vec(), + } + } + + #[must_use] + pub fn validity_window() -> Self { + use test_program_methods::{VALIDITY_WINDOW_ELF, VALIDITY_WINDOW_ID}; + Self { + id: VALIDITY_WINDOW_ID, + elf: VALIDITY_WINDOW_ELF.to_vec(), + } + } + + #[must_use] + pub fn validity_window_chain_caller() -> Self { + use test_program_methods::{ + VALIDITY_WINDOW_CHAIN_CALLER_ELF, VALIDITY_WINDOW_CHAIN_CALLER_ID, + }; + Self { + id: VALIDITY_WINDOW_CHAIN_CALLER_ID, + elf: VALIDITY_WINDOW_CHAIN_CALLER_ELF.to_vec(), + } + } + + #[must_use] + pub fn flash_swap_initiator() -> Self { + use test_program_methods::FLASH_SWAP_INITIATOR_ELF; + Self::new(FLASH_SWAP_INITIATOR_ELF.to_vec()) + .expect("flash_swap_initiator must be a valid Risc0 program") + } + + #[must_use] + pub fn flash_swap_callback() -> Self { + use test_program_methods::FLASH_SWAP_CALLBACK_ELF; + Self::new(FLASH_SWAP_CALLBACK_ELF.to_vec()) + .expect("flash_swap_callback must be a valid Risc0 program") + } + + #[must_use] + pub fn malicious_self_program_id() -> Self { + use test_program_methods::MALICIOUS_SELF_PROGRAM_ID_ELF; + Self::new(MALICIOUS_SELF_PROGRAM_ID_ELF.to_vec()) + .expect("malicious_self_program_id must be a valid Risc0 program") + } + + #[must_use] + pub fn malicious_caller_program_id() -> Self { + use test_program_methods::MALICIOUS_CALLER_PROGRAM_ID_ELF; + Self::new(MALICIOUS_CALLER_PROGRAM_ID_ELF.to_vec()) + .expect("malicious_caller_program_id must be a valid Risc0 program") + } + + #[must_use] + pub fn time_locked_transfer() -> Self { + use test_program_methods::TIME_LOCKED_TRANSFER_ELF; + Self::new(TIME_LOCKED_TRANSFER_ELF.to_vec()).unwrap() + } + + #[must_use] + pub fn pinata_cooldown() -> Self { + use test_program_methods::PINATA_COOLDOWN_ELF; + Self::new(PINATA_COOLDOWN_ELF.to_vec()).unwrap() } } @@ -311,7 +419,7 @@ mod tests { ..Account::default() }; let program_output = program - .execute(&[sender, recipient], &instruction_data) + .execute(None, &[sender, recipient], &instruction_data) .unwrap(); let [sender_post, recipient_post] = program_output.post_states.try_into().unwrap(); @@ -333,4 +441,21 @@ mod tests { assert_eq!(pinata_program.id, PINATA_ID); assert_eq!(pinata_program.elf, PINATA_ELF); } + + #[test] + fn builtin_program_ids_match_elfs() { + let cases: &[(&[u8], [u32; 8])] = &[ + (AMM_ELF, AMM_ID), + (AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID), + (ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID), + (CLOCK_ELF, CLOCK_ID), + (PINATA_ELF, PINATA_ID), + (PINATA_TOKEN_ELF, PINATA_TOKEN_ID), + (TOKEN_ELF, TOKEN_ID), + ]; + for (elf, expected_id) in cases { + let program = Program::new(elf.to_vec()).unwrap(); + assert_eq!(program.id(), *expected_id); + } + } } diff --git a/nssa/src/program_deployment_transaction/transaction.rs b/nssa/src/program_deployment_transaction/transaction.rs index 1e53388d..3fa775a8 100644 --- a/nssa/src/program_deployment_transaction/transaction.rs +++ b/nssa/src/program_deployment_transaction/transaction.rs @@ -2,9 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use nssa_core::account::AccountId; use sha2::{Digest as _, digest::FixedOutput as _}; -use crate::{ - V02State, error::NssaError, program::Program, program_deployment_transaction::message::Message, -}; +use crate::program_deployment_transaction::message::Message; #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct ProgramDeploymentTransaction { @@ -22,19 +20,6 @@ impl ProgramDeploymentTransaction { self.message } - pub(crate) fn validate_and_produce_public_state_diff( - &self, - state: &V02State, - ) -> Result { - // TODO: remove clone - let program = Program::new(self.message.bytecode.clone())?; - if state.programs().contains_key(&program.id()) { - Err(NssaError::ProgramAlreadyExists) - } else { - Ok(program) - } - } - #[must_use] pub fn hash(&self) -> [u8; 32] { let bytes = self.to_bytes(); diff --git a/nssa/src/public_transaction/transaction.rs b/nssa/src/public_transaction/transaction.rs index 8c84d83c..5ab87fa1 100644 --- a/nssa/src/public_transaction/transaction.rs +++ b/nssa/src/public_transaction/transaction.rs @@ -1,19 +1,10 @@ -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::HashSet; use borsh::{BorshDeserialize, BorshSerialize}; -use log::debug; -use nssa_core::{ - account::{Account, AccountId, AccountWithMetadata}, - program::{ChainedCall, DEFAULT_PROGRAM_ID, validate_execution}, -}; +use nssa_core::account::AccountId; use sha2::{Digest as _, digest::FixedOutput as _}; -use crate::{ - V02State, ensure, - error::NssaError, - public_transaction::{Message, WitnessSet}, - state::MAX_NUMBER_CHAINED_CALLS, -}; +use crate::public_transaction::{Message, WitnessSet}; #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct PublicTransaction { @@ -66,180 +57,6 @@ impl PublicTransaction { hasher.update(&bytes); hasher.finalize_fixed().into() } - - pub(crate) fn validate_and_produce_public_state_diff( - &self, - state: &V02State, - ) -> Result, NssaError> { - let message = self.message(); - let witness_set = self.witness_set(); - - // All account_ids must be different - ensure!( - message.account_ids.iter().collect::>().len() == message.account_ids.len(), - NssaError::InvalidInput("Duplicate account_ids found in message".into(),) - ); - - // Check exactly one nonce is provided for each signature - ensure!( - message.nonces.len() == witness_set.signatures_and_public_keys.len(), - NssaError::InvalidInput( - "Mismatch between number of nonces and signatures/public keys".into(), - ) - ); - - // Check the signatures are valid - ensure!( - witness_set.is_valid_for(message), - NssaError::InvalidInput("Invalid signature for given message and public key".into()) - ); - - let signer_account_ids = self.signer_account_ids(); - // Check nonces corresponds to the current nonces on the public state. - for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) { - let current_nonce = state.get_account_by_id(*account_id).nonce; - ensure!( - current_nonce == *nonce, - NssaError::InvalidInput("Nonce mismatch".into()) - ); - } - - // Build pre_states for execution - let input_pre_states: Vec<_> = message - .account_ids - .iter() - .map(|account_id| { - AccountWithMetadata::new( - state.get_account_by_id(*account_id), - signer_account_ids.contains(account_id), - *account_id, - ) - }) - .collect(); - - let mut state_diff: HashMap = HashMap::new(); - - let initial_call = ChainedCall { - program_id: message.program_id, - instruction_data: message.instruction_data.clone(), - pre_states: input_pre_states, - pda_seeds: vec![], - }; - - let mut chained_calls = VecDeque::from_iter([(initial_call, None)]); - let mut chain_calls_counter = 0; - - while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() { - ensure!( - chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS, - NssaError::MaxChainedCallsDepthExceeded - ); - - // Check that the `program_id` corresponds to a deployed program - let Some(program) = state.programs().get(&chained_call.program_id) else { - return Err(NssaError::InvalidInput("Unknown program".into())); - }; - - debug!( - "Program {:?} pre_states: {:?}, instruction_data: {:?}", - chained_call.program_id, chained_call.pre_states, chained_call.instruction_data - ); - let mut program_output = - program.execute(&chained_call.pre_states, &chained_call.instruction_data)?; - debug!( - "Program {:?} output: {:?}", - chained_call.program_id, program_output - ); - - let authorized_pdas = nssa_core::program::compute_authorized_pdas( - caller_program_id, - &chained_call.pda_seeds, - ); - - for pre in &program_output.pre_states { - let account_id = pre.account_id; - // Check that the program output pre_states coincide with the values in the public - // state or with any modifications to those values during the chain of calls. - let expected_pre = state_diff - .get(&account_id) - .cloned() - .unwrap_or_else(|| state.get_account_by_id(account_id)); - ensure!( - pre.account == expected_pre, - NssaError::InvalidProgramBehavior - ); - - // Check that authorization flags are consistent with the provided ones or - // authorized by program through the PDA mechanism - let is_authorized = signer_account_ids.contains(&account_id) - || authorized_pdas.contains(&account_id); - ensure!( - pre.is_authorized == is_authorized, - NssaError::InvalidProgramBehavior - ); - } - - // Verify execution corresponds to a well-behaved program. - // See the # Programs section for the definition of the `validate_execution` method. - ensure!( - validate_execution( - &program_output.pre_states, - &program_output.post_states, - chained_call.program_id, - ), - NssaError::InvalidProgramBehavior - ); - - for post in program_output - .post_states - .iter_mut() - .filter(|post| post.requires_claim()) - { - // The invoked program can only claim accounts with default program id. - if post.account().program_owner == DEFAULT_PROGRAM_ID { - post.account_mut().program_owner = chained_call.program_id; - } else { - return Err(NssaError::InvalidProgramBehavior); - } - } - - // Update the state diff - for (pre, post) in program_output - .pre_states - .iter() - .zip(program_output.post_states.iter()) - { - state_diff.insert(pre.account_id, post.account().clone()); - } - - for new_call in program_output.chained_calls.into_iter().rev() { - chained_calls.push_front((new_call, Some(chained_call.program_id))); - } - - chain_calls_counter = chain_calls_counter - .checked_add(1) - .expect("we check the max depth at the beginning of the loop"); - } - - // Check that all modified uninitialized accounts where claimed - for post in state_diff.iter().filter_map(|(account_id, post)| { - let pre = state.get_account_by_id(*account_id); - if pre.program_owner != DEFAULT_PROGRAM_ID { - return None; - } - if pre == *post { - return None; - } - Some(post) - }) { - ensure!( - post.program_owner != DEFAULT_PROGRAM_ID, - NssaError::InvalidProgramBehavior - ); - } - - Ok(state_diff) - } } #[cfg(test)] @@ -247,10 +64,11 @@ pub mod tests { use sha2::{Digest as _, digest::FixedOutput as _}; use crate::{ - AccountId, PrivateKey, PublicKey, PublicTransaction, Signature, V02State, + AccountId, PrivateKey, PublicKey, PublicTransaction, Signature, V03State, error::NssaError, program::Program, public_transaction::{Message, WitnessSet}, + validated_state_diff::ValidatedStateDiff, }; fn keys_for_tests() -> (PrivateKey, PrivateKey, AccountId, AccountId) { @@ -261,10 +79,10 @@ pub mod tests { (key1, key2, addr1, addr2) } - fn state_for_tests() -> V02State { + fn state_for_tests() -> V03State { let (_, _, addr1, addr2) = keys_for_tests(); let initial_data = [(addr1, 10000), (addr2, 20000)]; - V02State::new_with_genesis_accounts(&initial_data, &[]) + V03State::new_with_genesis_accounts(&initial_data, &[], 0) } fn transaction_for_tests() -> PublicTransaction { @@ -359,7 +177,7 @@ pub mod tests { let witness_set = WitnessSet::for_message(&message, &[&key1, &key1]); let tx = PublicTransaction::new(message, witness_set); - let result = tx.validate_and_produce_public_state_diff(&state); + let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); } @@ -379,7 +197,7 @@ pub mod tests { let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]); let tx = PublicTransaction::new(message, witness_set); - let result = tx.validate_and_produce_public_state_diff(&state); + let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); } @@ -400,7 +218,7 @@ pub mod tests { let mut witness_set = WitnessSet::for_message(&message, &[&key1, &key2]); witness_set.signatures_and_public_keys[0].0 = Signature::new_for_tests([1; 64]); let tx = PublicTransaction::new(message, witness_set); - let result = tx.validate_and_produce_public_state_diff(&state); + let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); } @@ -420,7 +238,7 @@ pub mod tests { let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]); let tx = PublicTransaction::new(message, witness_set); - let result = tx.validate_and_produce_public_state_diff(&state); + let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); } @@ -436,7 +254,7 @@ pub mod tests { let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]); let tx = PublicTransaction::new(message, witness_set); - let result = tx.validate_and_produce_public_state_diff(&state); + let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); } } diff --git a/nssa/src/signature/mod.rs b/nssa/src/signature/mod.rs index 63377f15..3a594da6 100644 --- a/nssa/src/signature/mod.rs +++ b/nssa/src/signature/mod.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use borsh::{BorshDeserialize, BorshSerialize}; pub use private_key::PrivateKey; pub use public_key::PublicKey; @@ -12,11 +14,27 @@ pub struct Signature { } impl std::fmt::Debug for Signature { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(self, f) + } +} + +impl std::fmt::Display for Signature { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", hex::encode(self.value)) } } +impl FromStr for Signature { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut bytes = [0_u8; 64]; + hex::decode_to_slice(s, &mut bytes)?; + Ok(Self { value: bytes }) + } +} + impl Signature { #[must_use] pub fn new(key: &PrivateKey, message: &[u8]) -> Self { @@ -31,21 +49,28 @@ impl Signature { aux_random: [u8; 32], ) -> Self { let value = { - let secp = secp256k1::Secp256k1::new(); - let secret_key = secp256k1::SecretKey::from_byte_array(*key.value()).unwrap(); - let keypair = secp256k1::Keypair::from_secret_key(&secp, &secret_key); - let signature = secp.sign_schnorr_with_aux_rand(message, &keypair, &aux_random); - signature.to_byte_array() + let signing_key = k256::schnorr::SigningKey::from_bytes(key.value()) + .expect("Expect valid signing key"); + signing_key + .sign_raw(message, &aux_random) + .expect("Expect to produce a valid signature") + .to_bytes() }; + Self { value } } #[must_use] pub fn is_valid_for(&self, bytes: &[u8], public_key: &PublicKey) -> bool { - let pk = secp256k1::XOnlyPublicKey::from_byte_array(*public_key.value()).unwrap(); - let secp = secp256k1::Secp256k1::new(); - let sig = secp256k1::schnorr::Signature::from_byte_array(self.value); - secp.verify_schnorr(&sig, bytes, &pk).is_ok() + let Ok(pk) = k256::schnorr::VerifyingKey::from_bytes(public_key.value()) else { + return false; + }; + + let Ok(sig) = k256::schnorr::Signature::try_from(self.value.as_slice()) else { + return false; + }; + + pk.verify_raw(bytes, &sig).is_ok() } } diff --git a/nssa/src/signature/private_key.rs b/nssa/src/signature/private_key.rs index d8ece0e0..1bfecf80 100644 --- a/nssa/src/signature/private_key.rs +++ b/nssa/src/signature/private_key.rs @@ -1,13 +1,37 @@ +use std::str::FromStr; + use rand::{Rng as _, rngs::OsRng}; -use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use crate::error::NssaError; // TODO: Remove Debug, Clone, Serialize, Deserialize, PartialEq and Eq for security reasons // TODO: Implement Zeroize -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Clone, SerializeDisplay, DeserializeFromStr, PartialEq, Eq)] pub struct PrivateKey([u8; 32]); +impl std::fmt::Debug for PrivateKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(self, f) + } +} + +impl std::fmt::Display for PrivateKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex::encode(self.0)) + } +} + +impl FromStr for PrivateKey { + type Err = NssaError; + + fn from_str(s: &str) -> Result { + let mut bytes = [0_u8; 32]; + hex::decode_to_slice(s, &mut bytes).map_err(|_err| NssaError::InvalidPrivateKey)?; + Self::try_new(bytes) + } +} + impl PrivateKey { #[must_use] pub fn new_os_random() -> Self { @@ -21,7 +45,7 @@ impl PrivateKey { } fn is_valid_key(value: [u8; 32]) -> bool { - secp256k1::SecretKey::from_byte_array(value).is_ok() + k256::SecretKey::from_bytes(&value.into()).is_ok() } pub fn try_new(value: [u8; 32]) -> Result { diff --git a/nssa/src/signature/public_key.rs b/nssa/src/signature/public_key.rs index 9cdac761..ebec6b62 100644 --- a/nssa/src/signature/public_key.rs +++ b/nssa/src/signature/public_key.rs @@ -1,19 +1,38 @@ +use std::str::FromStr; + use borsh::{BorshDeserialize, BorshSerialize}; +use k256::elliptic_curve::sec1::ToEncodedPoint as _; use nssa_core::account::AccountId; -use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use sha2::{Digest as _, Sha256}; use crate::{PrivateKey, error::NssaError}; -#[derive(Clone, PartialEq, Eq, BorshSerialize, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, BorshSerialize, SerializeDisplay, DeserializeFromStr)] pub struct PublicKey([u8; 32]); impl std::fmt::Debug for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(self, f) + } +} + +impl std::fmt::Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", hex::encode(self.0)) } } +impl FromStr for PublicKey { + type Err = NssaError; + + fn from_str(s: &str) -> Result { + let mut bytes = [0_u8; 32]; + hex::decode_to_slice(s, &mut bytes).map_err(NssaError::InvalidHexPublicKey)?; + Self::try_new(bytes) + } +} + impl BorshDeserialize for PublicKey { fn deserialize_reader(reader: &mut R) -> std::io::Result { let mut buf = [0_u8; 32]; @@ -27,19 +46,24 @@ impl PublicKey { #[must_use] pub fn new_from_private_key(key: &PrivateKey) -> Self { let value = { - let secret_key = secp256k1::SecretKey::from_byte_array(*key.value()).unwrap(); - let public_key = - secp256k1::PublicKey::from_secret_key(&secp256k1::Secp256k1::new(), &secret_key); - let (x_only, _) = public_key.x_only_public_key(); - x_only.serialize() + let secret_key = k256::SecretKey::from_bytes(&(*key.value()).into()) + .expect("Expect a valid private key"); + + let encoded = secret_key.public_key().to_encoded_point(false); + let x_only = encoded + .x() + .expect("Expect k256 point to have a x-coordinate"); + + *x_only.first_chunk().expect("x_only is exactly 32 bytes") }; Self(value) } pub fn try_new(value: [u8; 32]) -> Result { - // Check point is valid - let _ = secp256k1::XOnlyPublicKey::from_byte_array(value) - .map_err(NssaError::InvalidPublicKey)?; + // Check point is a valid x-only public key + let _ = + k256::schnorr::VerifyingKey::from_bytes(&value).map_err(NssaError::InvalidPublicKey)?; + Ok(Self(value)) } diff --git a/nssa/src/state.rs b/nssa/src/state.rs index 8ae26e74..7753e1a3 100644 --- a/nssa/src/state.rs +++ b/nssa/src/state.rs @@ -1,17 +1,26 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh::{BorshDeserialize, BorshSerialize}; +use clock_core::ClockAccountData; +pub use clock_core::{ + CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID, + CLOCK_PROGRAM_ACCOUNT_IDS, +}; use nssa_core::{ - Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier, + BlockId, Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier, + Timestamp, account::{Account, AccountId, Nonce}, program::ProgramId, }; use crate::{ - error::NssaError, merkle_tree::MerkleTree, - privacy_preserving_transaction::PrivacyPreservingTransaction, program::Program, + error::NssaError, + merkle_tree::MerkleTree, + privacy_preserving_transaction::PrivacyPreservingTransaction, + program::Program, program_deployment_transaction::ProgramDeploymentTransaction, public_transaction::PublicTransaction, + validated_state_diff::{StateDiff, ValidatedStateDiff}, }; pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; @@ -72,7 +81,7 @@ impl NullifierSet { Self(BTreeSet::new()) } - fn extend(&mut self, new_nullifiers: Vec) { + fn extend(&mut self, new_nullifiers: &[Nullifier]) { self.0.extend(new_nullifiers); } @@ -107,17 +116,18 @@ impl BorshDeserialize for NullifierSet { #[derive(Clone, BorshSerialize, BorshDeserialize)] #[cfg_attr(test, derive(Debug, PartialEq, Eq))] -pub struct V02State { +pub struct V03State { public_state: HashMap, private_state: (CommitmentSet, NullifierSet), programs: HashMap, } -impl V02State { +impl V03State { #[must_use] pub fn new_with_genesis_accounts( initial_data: &[(AccountId, u128)], initial_commitments: &[nssa_core::Commitment], + genesis_timestamp: nssa_core::Timestamp, ) -> Self { let authenticated_transfer_program = Program::authenticated_transfer_program(); let public_state = initial_data @@ -143,78 +153,90 @@ impl V02State { programs: HashMap::new(), }; + this.insert_program(Program::clock()); + this.insert_clock_accounts(genesis_timestamp); + this.insert_program(Program::authenticated_transfer_program()); this.insert_program(Program::token()); this.insert_program(Program::amm()); + this.insert_program(Program::ata()); this } + fn insert_clock_accounts(&mut self, genesis_timestamp: nssa_core::Timestamp) { + let data = ClockAccountData { + block_id: 0, + timestamp: genesis_timestamp, + } + .to_bytes(); + let clock_program_id = Program::clock().id(); + for account_id in CLOCK_PROGRAM_ACCOUNT_IDS { + self.public_state.insert( + account_id, + Account { + program_owner: clock_program_id, + data: data + .clone() + .try_into() + .expect("Clock account data should fit within accounts data"), + ..Account::default() + }, + ); + } + } + pub(crate) fn insert_program(&mut self, program: Program) { self.programs.insert(program.id(), program); } - pub fn transition_from_public_transaction( - &mut self, - tx: &PublicTransaction, - ) -> Result<(), NssaError> { - let state_diff = tx.validate_and_produce_public_state_diff(self)?; - + pub fn apply_state_diff(&mut self, diff: ValidatedStateDiff) { + let StateDiff { + signer_account_ids, + public_diff, + new_commitments, + new_nullifiers, + program, + } = diff.into_state_diff(); #[expect( clippy::iter_over_hash_type, reason = "Iteration order doesn't matter here" )] - for (account_id, post) in state_diff { - let current_account = self.get_account_by_id_mut(account_id); - - *current_account = post; + for (account_id, account) in public_diff { + *self.get_account_by_id_mut(account_id) = account; } - - for account_id in tx.signer_account_ids() { - let current_account = self.get_account_by_id_mut(account_id); - current_account.nonce.public_account_nonce_increment(); + for account_id in signer_account_ids { + self.get_account_by_id_mut(account_id) + .nonce + .public_account_nonce_increment(); } + self.private_state.0.extend(&new_commitments); + self.private_state.1.extend(&new_nullifiers); + if let Some(program) = program { + self.insert_program(program); + } + } + pub fn transition_from_public_transaction( + &mut self, + tx: &PublicTransaction, + block_id: BlockId, + timestamp: Timestamp, + ) -> Result<(), NssaError> { + let diff = ValidatedStateDiff::from_public_transaction(tx, self, block_id, timestamp)?; + self.apply_state_diff(diff); Ok(()) } pub fn transition_from_privacy_preserving_transaction( &mut self, tx: &PrivacyPreservingTransaction, + block_id: BlockId, + timestamp: Timestamp, ) -> Result<(), NssaError> { - // 1. Verify the transaction satisfies acceptance criteria - let public_state_diff = tx.validate_and_produce_public_state_diff(self)?; - - let message = tx.message(); - - // 2. Add new commitments - self.private_state.0.extend(&message.new_commitments); - - // 3. Add new nullifiers - let new_nullifiers = message - .new_nullifiers - .iter() - .cloned() - .map(|(nullifier, _)| nullifier) - .collect::>(); - self.private_state.1.extend(new_nullifiers); - - // 4. Update public accounts - #[expect( - clippy::iter_over_hash_type, - reason = "Iteration order doesn't matter here" - )] - for (account_id, post) in public_state_diff { - let current_account = self.get_account_by_id_mut(account_id); - *current_account = post; - } - - // 5. Increment nonces for public signers - for account_id in tx.signer_account_ids() { - let current_account = self.get_account_by_id_mut(account_id); - current_account.nonce.public_account_nonce_increment(); - } - + let diff = + ValidatedStateDiff::from_privacy_preserving_transaction(tx, self, block_id, timestamp)?; + self.apply_state_diff(diff); Ok(()) } @@ -222,8 +244,8 @@ impl V02State { &mut self, tx: &ProgramDeploymentTransaction, ) -> Result<(), NssaError> { - let program = tx.validate_and_produce_public_state_diff(self)?; - self.insert_program(program); + let diff = ValidatedStateDiff::from_program_deployment_transaction(tx, self)?; + self.apply_state_diff(diff); Ok(()) } @@ -286,7 +308,7 @@ impl V02State { } // TODO: Testnet only. Refactor to prevent compilation on mainnet. -impl V02State { +impl V03State { pub fn add_pinata_program(&mut self, account_id: AccountId) { self.insert_program(Program::pinata()); @@ -318,7 +340,7 @@ impl V02State { } #[cfg(any(test, feature = "test-utils"))] -impl V02State { +impl V03State { pub fn force_insert_account(&mut self, account_id: AccountId, account: Account) { self.public_state.insert(account_id, account); } @@ -335,14 +357,15 @@ pub mod tests { use std::collections::HashMap; use nssa_core::{ - Commitment, Nullifier, NullifierPublicKey, NullifierSecretKey, SharedSecretKey, + BlockId, Commitment, Nullifier, NullifierPublicKey, NullifierSecretKey, SharedSecretKey, + Timestamp, account::{Account, AccountId, AccountWithMetadata, Nonce, data::Data}, encryption::{EphemeralPublicKey, Scalar, ViewingPublicKey}, - program::{PdaSeed, ProgramId}, + program::{BlockValidityWindow, PdaSeed, ProgramId, TimestampValidityWindow}, }; use crate::{ - PublicKey, PublicTransaction, V02State, + PublicKey, PublicTransaction, V03State, error::NssaError, execute_and_prove, privacy_preserving_transaction::{ @@ -354,10 +377,13 @@ pub mod tests { program::Program, public_transaction, signature::PrivateKey, - state::MAX_NUMBER_CHAINED_CALLS, + state::{ + CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID, + CLOCK_PROGRAM_ACCOUNT_IDS, MAX_NUMBER_CHAINED_CALLS, + }, }; - impl V02State { + impl V03State { /// Include test programs in the builtin programs map. #[must_use] pub fn with_test_programs(mut self) -> Self { @@ -373,6 +399,13 @@ pub mod tests { self.insert_program(Program::amm()); self.insert_program(Program::claimer()); self.insert_program(Program::changer_claimer()); + self.insert_program(Program::validity_window()); + self.insert_program(Program::flash_swap_initiator()); + self.insert_program(Program::flash_swap_callback()); + self.insert_program(Program::malicious_self_program_id()); + self.insert_program(Program::malicious_caller_program_id()); + self.insert_program(Program::time_locked_transfer()); + self.insert_program(Program::pinata_cooldown()); self } @@ -449,19 +482,61 @@ pub mod tests { } } + // ── Flash Swap types (mirrors of guest types for host-side serialisation) ── + + #[derive(serde::Serialize, serde::Deserialize)] + struct CallbackInstruction { + return_funds: bool, + token_program_id: ProgramId, + amount: u128, + } + + #[derive(serde::Serialize, serde::Deserialize)] + enum FlashSwapInstruction { + Initiate { + token_program_id: ProgramId, + callback_program_id: ProgramId, + amount_out: u128, + callback_instruction_data: Vec, + }, + InvariantCheck { + min_vault_balance: u128, + }, + } + fn transfer_transaction( from: AccountId, from_key: &PrivateKey, - nonce: u128, + from_nonce: u128, to: AccountId, + to_key: &PrivateKey, + to_nonce: u128, balance: u128, ) -> PublicTransaction { let account_ids = vec![from, to]; - let nonces = vec![Nonce(nonce)]; + let nonces = vec![Nonce(from_nonce), Nonce(to_nonce)]; let program_id = Program::authenticated_transfer_program().id(); let message = public_transaction::Message::try_new(program_id, account_ids, nonces, balance).unwrap(); - let witness_set = public_transaction::WitnessSet::for_message(&message, &[from_key]); + let witness_set = + public_transaction::WitnessSet::for_message(&message, &[from_key, to_key]); + PublicTransaction::new(message, witness_set) + } + + fn build_flash_swap_tx( + initiator: &Program, + vault_id: AccountId, + receiver_id: AccountId, + instruction: FlashSwapInstruction, + ) -> PublicTransaction { + let message = public_transaction::Message::try_new( + initiator.id(), + vec![vault_id, receiver_id], + vec![], // no signers — vault is PDA-authorised + instruction, + ) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); PublicTransaction::new(message, witness_set) } @@ -473,6 +548,7 @@ pub mod tests { let addr2 = AccountId::from(&PublicKey::new_from_private_key(&key2)); let initial_data = [(addr1, 100_u128), (addr2, 151_u128)]; let authenticated_transfers_program = Program::authenticated_transfer_program(); + let clock_program = Program::clock(); let expected_public_state = { let mut this = HashMap::new(); this.insert( @@ -491,6 +567,16 @@ pub mod tests { ..Account::default() }, ); + for account_id in CLOCK_PROGRAM_ACCOUNT_IDS { + this.insert( + account_id, + Account { + program_owner: clock_program.id(), + data: [0_u8; 16].to_vec().try_into().unwrap(), + ..Account::default() + }, + ); + } this }; let expected_builtin_programs = { @@ -499,12 +585,14 @@ pub mod tests { authenticated_transfers_program.id(), authenticated_transfers_program, ); + this.insert(clock_program.id(), clock_program); this.insert(Program::token().id(), Program::token()); this.insert(Program::amm().id(), Program::amm()); + this.insert(Program::ata().id(), Program::ata()); this }; - let state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); assert_eq!(state.public_state, expected_public_state); assert_eq!(state.programs, expected_builtin_programs); @@ -512,7 +600,7 @@ pub mod tests { #[test] fn insert_program() { - let mut state = V02State::new_with_genesis_accounts(&[], &[]); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); let program_to_insert = Program::simple_balance_transfer(); let program_id = program_to_insert.id(); assert!(!state.programs.contains_key(&program_id)); @@ -527,7 +615,7 @@ pub mod tests { let key = PrivateKey::try_new([1; 32]).unwrap(); let account_id = AccountId::from(&PublicKey::new_from_private_key(&key)); let initial_data = [(account_id, 100_u128)]; - let state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); let expected_account = &state.public_state[&account_id]; let account = state.get_account_by_id(account_id); @@ -538,7 +626,7 @@ pub mod tests { #[test] fn get_account_by_account_id_default_account() { let addr2 = AccountId::new([0; 32]); - let state = V02State::new_with_genesis_accounts(&[], &[]); + let state = V03State::new_with_genesis_accounts(&[], &[], 0); let expected_account = Account::default(); let account = state.get_account_by_id(addr2); @@ -548,7 +636,7 @@ pub mod tests { #[test] fn builtin_programs_getter() { - let state = V02State::new_with_genesis_accounts(&[], &[]); + let state = V03State::new_with_genesis_accounts(&[], &[], 0); let builtin_programs = state.programs(); @@ -560,19 +648,20 @@ pub mod tests { let key = PrivateKey::try_new([1; 32]).unwrap(); let account_id = AccountId::from(&PublicKey::new_from_private_key(&key)); let initial_data = [(account_id, 100)]; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); let from = account_id; - let to = AccountId::new([2; 32]); + let to_key = PrivateKey::try_new([2; 32]).unwrap(); + let to = AccountId::from(&PublicKey::new_from_private_key(&to_key)); assert_eq!(state.get_account_by_id(to), Account::default()); let balance_to_move = 5; - let tx = transfer_transaction(from, &key, 0, to, balance_to_move); - state.transition_from_public_transaction(&tx).unwrap(); + let tx = transfer_transaction(from, &key, 0, to, &to_key, 0, balance_to_move); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); assert_eq!(state.get_account_by_id(from).balance, 95); assert_eq!(state.get_account_by_id(to).balance, 5); assert_eq!(state.get_account_by_id(from).nonce, Nonce(1)); - assert_eq!(state.get_account_by_id(to).nonce, Nonce(0)); + assert_eq!(state.get_account_by_id(to).nonce, Nonce(1)); } #[test] @@ -580,15 +669,16 @@ pub mod tests { let key = PrivateKey::try_new([1; 32]).unwrap(); let account_id = AccountId::from(&PublicKey::new_from_private_key(&key)); let initial_data = [(account_id, 100)]; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); let from = account_id; let from_key = key; - let to = AccountId::new([2; 32]); + let to_key = PrivateKey::try_new([2; 32]).unwrap(); + let to = AccountId::from(&PublicKey::new_from_private_key(&to_key)); let balance_to_move = 101; assert!(state.get_account_by_id(from).balance < balance_to_move); - let tx = transfer_transaction(from, &from_key, 0, to, balance_to_move); - let result = state.transition_from_public_transaction(&tx); + let tx = transfer_transaction(from, &from_key, 0, to, &to_key, 0, balance_to_move); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::ProgramExecutionFailed(_)))); assert_eq!(state.get_account_by_id(from).balance, 100); @@ -604,20 +694,21 @@ pub mod tests { let account_id1 = AccountId::from(&PublicKey::new_from_private_key(&key1)); let account_id2 = AccountId::from(&PublicKey::new_from_private_key(&key2)); let initial_data = [(account_id1, 100), (account_id2, 200)]; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); let from = account_id2; let from_key = key2; let to = account_id1; + let to_key = key1; assert_ne!(state.get_account_by_id(to), Account::default()); let balance_to_move = 8; - let tx = transfer_transaction(from, &from_key, 0, to, balance_to_move); - state.transition_from_public_transaction(&tx).unwrap(); + let tx = transfer_transaction(from, &from_key, 0, to, &to_key, 0, balance_to_move); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); assert_eq!(state.get_account_by_id(from).balance, 192); assert_eq!(state.get_account_by_id(to).balance, 108); assert_eq!(state.get_account_by_id(from).nonce, Nonce(1)); - assert_eq!(state.get_account_by_id(to).nonce, Nonce(0)); + assert_eq!(state.get_account_by_id(to).nonce, Nonce(1)); } #[test] @@ -627,29 +718,189 @@ pub mod tests { let key2 = PrivateKey::try_new([2; 32]).unwrap(); let account_id2 = AccountId::from(&PublicKey::new_from_private_key(&key2)); let initial_data = [(account_id1, 100)]; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); - let account_id3 = AccountId::new([3; 32]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); + let key3 = PrivateKey::try_new([3; 32]).unwrap(); + let account_id3 = AccountId::from(&PublicKey::new_from_private_key(&key3)); let balance_to_move = 5; - let tx = transfer_transaction(account_id1, &key1, 0, account_id2, balance_to_move); - state.transition_from_public_transaction(&tx).unwrap(); + let tx = transfer_transaction( + account_id1, + &key1, + 0, + account_id2, + &key2, + 0, + balance_to_move, + ); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let balance_to_move = 3; - let tx = transfer_transaction(account_id2, &key2, 0, account_id3, balance_to_move); - state.transition_from_public_transaction(&tx).unwrap(); + let tx = transfer_transaction( + account_id2, + &key2, + 1, + account_id3, + &key3, + 0, + balance_to_move, + ); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); assert_eq!(state.get_account_by_id(account_id1).balance, 95); assert_eq!(state.get_account_by_id(account_id2).balance, 2); assert_eq!(state.get_account_by_id(account_id3).balance, 3); assert_eq!(state.get_account_by_id(account_id1).nonce, Nonce(1)); - assert_eq!(state.get_account_by_id(account_id2).nonce, Nonce(1)); - assert_eq!(state.get_account_by_id(account_id3).nonce, Nonce(0)); + assert_eq!(state.get_account_by_id(account_id2).nonce, Nonce(2)); + assert_eq!(state.get_account_by_id(account_id3).nonce, Nonce(1)); + } + + fn clock_transaction(timestamp: nssa_core::Timestamp) -> PublicTransaction { + let message = public_transaction::Message::try_new( + Program::clock().id(), + CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(), + vec![], + timestamp, + ) + .unwrap(); + PublicTransaction::new( + message, + public_transaction::WitnessSet::from_raw_parts(vec![]), + ) + } + + fn clock_account_data(state: &V03State, account_id: AccountId) -> (u64, nssa_core::Timestamp) { + let data = state.get_account_by_id(account_id).data.into_inner(); + let parsed = clock_core::ClockAccountData::from_bytes(&data); + (parsed.block_id, parsed.timestamp) + } + + #[test] + fn clock_genesis_state_has_zero_block_id_and_genesis_timestamp() { + let genesis_timestamp = 1_000_000_u64; + let state = V03State::new_with_genesis_accounts(&[], &[], genesis_timestamp); + + let (block_id, timestamp) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + + assert_eq!(block_id, 0); + assert_eq!(timestamp, genesis_timestamp); + } + + #[test] + fn clock_invocation_increments_block_id() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + let tx = clock_transaction(1234); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + + let (block_id, _) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + assert_eq!(block_id, 1); + } + + #[test] + fn clock_invocation_stores_timestamp_from_instruction() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + let block_timestamp = 1_700_000_000_000_u64; + + let tx = clock_transaction(block_timestamp); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + + let (_, timestamp) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + assert_eq!(timestamp, block_timestamp); + } + + #[test] + fn clock_invocation_sequence_correctly_increments_block_id() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + for expected_block_id in 1_u64..=5 { + let tx = clock_transaction(expected_block_id * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + + let (block_id, timestamp) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + assert_eq!(block_id, expected_block_id); + assert_eq!(timestamp, expected_block_id * 1000); + } + } + + #[test] + fn clock_10_account_not_updated_when_block_id_not_multiple_of_10() { + let genesis_timestamp = 0_u64; + let mut state = V03State::new_with_genesis_accounts(&[], &[], genesis_timestamp); + + // Run 9 clock ticks (block_ids 1..=9), none of which are multiples of 10. + for tick in 1_u64..=9 { + let tx = clock_transaction(tick * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + } + + let (block_id_10, timestamp_10) = clock_account_data(&state, CLOCK_10_PROGRAM_ACCOUNT_ID); + // The 10-block account should still reflect genesis state. + assert_eq!(block_id_10, 0); + assert_eq!(timestamp_10, genesis_timestamp); + } + + #[test] + fn clock_10_account_updated_when_block_id_is_multiple_of_10() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + // Run 10 clock ticks so block_id reaches 10. + for tick in 1_u64..=10 { + let tx = clock_transaction(tick * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + } + + let (block_id_1, timestamp_1) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + let (block_id_10, timestamp_10) = clock_account_data(&state, CLOCK_10_PROGRAM_ACCOUNT_ID); + assert_eq!(block_id_1, 10); + assert_eq!(block_id_10, 10); + assert_eq!(timestamp_10, timestamp_1); + } + + #[test] + fn clock_50_account_only_updated_at_multiples_of_50() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + // After 49 ticks the 50-block account should be unchanged. + for tick in 1_u64..=49 { + let tx = clock_transaction(tick * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + } + let (block_id_50, _) = clock_account_data(&state, CLOCK_50_PROGRAM_ACCOUNT_ID); + assert_eq!(block_id_50, 0); + + // Tick 50 — now the 50-block account should update. + let tx = clock_transaction(50 * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + let (block_id_50, timestamp_50) = clock_account_data(&state, CLOCK_50_PROGRAM_ACCOUNT_ID); + assert_eq!(block_id_50, 50); + assert_eq!(timestamp_50, 50 * 1000); + } + + #[test] + fn all_three_clock_accounts_updated_at_multiple_of_50() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + // Advance to block 50 (a multiple of both 10 and 50). + for tick in 1_u64..=50 { + let tx = clock_transaction(tick * 1000); + state.transition_from_public_transaction(&tx, 0, 0).unwrap(); + } + + let (block_id_1, ts_1) = clock_account_data(&state, CLOCK_01_PROGRAM_ACCOUNT_ID); + let (block_id_10, ts_10) = clock_account_data(&state, CLOCK_10_PROGRAM_ACCOUNT_ID); + let (block_id_50, ts_50) = clock_account_data(&state, CLOCK_50_PROGRAM_ACCOUNT_ID); + + assert_eq!(block_id_1, 50); + assert_eq!(block_id_10, 50); + assert_eq!(block_id_50, 50); + assert_eq!(ts_1, ts_10); + assert_eq!(ts_1, ts_50); } #[test] fn program_should_fail_if_modifies_nonces() { let initial_data = [(AccountId::new([1; 32]), 100)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_ids = vec![AccountId::new([1; 32])]; let program_id = Program::nonce_changer_program().id(); let message = @@ -657,7 +908,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -666,7 +917,7 @@ pub mod tests { fn program_should_fail_if_output_accounts_exceed_inputs() { let initial_data = [(AccountId::new([1; 32]), 100)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_ids = vec![AccountId::new([1; 32])]; let program_id = Program::extra_output_program().id(); let message = @@ -674,7 +925,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -683,7 +934,7 @@ pub mod tests { fn program_should_fail_with_missing_output_accounts() { let initial_data = [(AccountId::new([1; 32]), 100)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_ids = vec![AccountId::new([1; 32]), AccountId::new([2; 32])]; let program_id = Program::missing_output_program().id(); let message = @@ -691,7 +942,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -700,7 +951,7 @@ pub mod tests { fn program_should_fail_if_modifies_program_owner_with_only_non_default_program_owner() { let initial_data = [(AccountId::new([1; 32]), 0)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_id = AccountId::new([1; 32]); let account = state.get_account_by_id(account_id); // Assert the target account only differs from the default account in the program owner @@ -715,7 +966,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -723,7 +974,7 @@ pub mod tests { #[test] fn program_should_fail_if_modifies_program_owner_with_only_non_default_balance() { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]) + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0) .with_test_programs() .with_non_default_accounts_but_default_program_owners(); let account_id = AccountId::new([255; 32]); @@ -739,7 +990,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -747,7 +998,7 @@ pub mod tests { #[test] fn program_should_fail_if_modifies_program_owner_with_only_non_default_nonce() { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]) + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0) .with_test_programs() .with_non_default_accounts_but_default_program_owners(); let account_id = AccountId::new([254; 32]); @@ -763,7 +1014,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -771,7 +1022,7 @@ pub mod tests { #[test] fn program_should_fail_if_modifies_program_owner_with_only_non_default_data() { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]) + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0) .with_test_programs() .with_non_default_accounts_but_default_program_owners(); let account_id = AccountId::new([253; 32]); @@ -787,7 +1038,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -796,7 +1047,7 @@ pub mod tests { fn program_should_fail_if_transfers_balance_from_non_owned_account() { let initial_data = [(AccountId::new([1; 32]), 100)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let sender_account_id = AccountId::new([1; 32]); let receiver_account_id = AccountId::new([2; 32]); let balance_to_move: u128 = 1; @@ -815,7 +1066,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -823,7 +1074,7 @@ pub mod tests { #[test] fn program_should_fail_if_modifies_data_of_non_owned_account() { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]) + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0) .with_test_programs() .with_non_default_accounts_but_default_program_owners(); let account_id = AccountId::new([255; 32]); @@ -840,7 +1091,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -849,7 +1100,7 @@ pub mod tests { fn program_should_fail_if_does_not_preserve_total_balance_by_minting() { let initial_data = []; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_id = AccountId::new([1; 32]); let program_id = Program::minter().id(); @@ -858,7 +1109,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -866,7 +1117,7 @@ pub mod tests { #[test] fn program_should_fail_if_does_not_preserve_total_balance_by_burning() { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]) + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0) .with_test_programs() .with_account_owned_by_burner_program(); let program_id = Program::burner().id(); @@ -887,7 +1138,7 @@ pub mod tests { .unwrap(); let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -916,7 +1167,7 @@ pub mod tests { sender_keys: &TestPublicKeys, recipient_keys: &TestPrivateKeys, balance_to_move: u128, - state: &V02State, + state: &V03State, ) -> PrivacyPreservingTransaction { let sender = AccountWithMetadata::new( state.get_account_by_id(sender_keys.account_id()), @@ -960,7 +1211,7 @@ pub mod tests { sender_private_account: &Account, recipient_keys: &TestPrivateKeys, balance_to_move: u128, - state: &V02State, + state: &V03State, ) -> PrivacyPreservingTransaction { let program = Program::authenticated_transfer_program(); let sender_commitment = Commitment::new(&sender_keys.npk(), sender_private_account); @@ -1012,7 +1263,7 @@ pub mod tests { sender_private_account: &Account, recipient_account_id: &AccountId, balance_to_move: u128, - state: &V02State, + state: &V03State, ) -> PrivacyPreservingTransaction { let program = Program::authenticated_transfer_program(); let sender_commitment = Commitment::new(&sender_keys.npk(), sender_private_account); @@ -1058,7 +1309,7 @@ pub mod tests { let recipient_keys = test_private_account_keys_1(); let mut state = - V02State::new_with_genesis_accounts(&[(sender_keys.account_id(), 200)], &[]); + V03State::new_with_genesis_accounts(&[(sender_keys.account_id(), 200)], &[], 0); let balance_to_move = 37; @@ -1080,7 +1331,7 @@ pub mod tests { assert!(!state.private_state.0.contains(&expected_new_commitment)); state - .transition_from_privacy_preserving_transaction(&tx) + .transition_from_privacy_preserving_transaction(&tx, 1, 0) .unwrap(); let sender_post = state.get_account_by_id(sender_keys.account_id()); @@ -1106,7 +1357,7 @@ pub mod tests { }; let recipient_keys = test_private_account_keys_2(); - let mut state = V02State::new_with_genesis_accounts(&[], &[]) + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0) .with_private_account(&sender_keys, &sender_private_account); let balance_to_move = 37; @@ -1150,7 +1401,7 @@ pub mod tests { assert!(!state.private_state.1.contains(&expected_new_nullifier)); state - .transition_from_privacy_preserving_transaction(&tx) + .transition_from_privacy_preserving_transaction(&tx, 1, 0) .unwrap(); assert_eq!(state.public_state, previous_public_state); @@ -1173,9 +1424,10 @@ pub mod tests { }; let recipient_keys = test_public_account_keys_1(); let recipient_initial_balance = 400; - let mut state = V02State::new_with_genesis_accounts( + let mut state = V03State::new_with_genesis_accounts( &[(recipient_keys.account_id(), recipient_initial_balance)], &[], + 0, ) .with_private_account(&sender_keys, &sender_private_account); @@ -1214,7 +1466,7 @@ pub mod tests { assert!(!state.private_state.1.contains(&expected_new_nullifier)); state - .transition_from_privacy_preserving_transaction(&tx) + .transition_from_privacy_preserving_transaction(&tx, 1, 0) .unwrap(); let recipient_post = state.get_account_by_id(recipient_keys.account_id()); @@ -2127,7 +2379,7 @@ pub mod tests { }; let recipient_keys = test_private_account_keys_2(); - let mut state = V02State::new_with_genesis_accounts(&[], &[]) + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0) .with_private_account(&sender_keys, &sender_private_account); let balance_to_move = 37; @@ -2142,7 +2394,7 @@ pub mod tests { ); state - .transition_from_privacy_preserving_transaction(&tx) + .transition_from_privacy_preserving_transaction(&tx, 1, 0) .unwrap(); let sender_private_account = Account { @@ -2160,7 +2412,7 @@ pub mod tests { &state, ); - let result = state.transition_from_privacy_preserving_transaction(&tx); + let result = state.transition_from_privacy_preserving_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidInput(_)))); let NssaError::InvalidInput(error_message) = result.err().unwrap() else { @@ -2207,15 +2459,14 @@ pub mod tests { #[test] fn claiming_mechanism() { let program = Program::authenticated_transfer_program(); - let key = PrivateKey::try_new([1; 32]).unwrap(); - let account_id = AccountId::from(&PublicKey::new_from_private_key(&key)); + let from_key = PrivateKey::try_new([1; 32]).unwrap(); + let from = AccountId::from(&PublicKey::new_from_private_key(&from_key)); let initial_balance = 100; - let initial_data = [(account_id, initial_balance)]; + let initial_data = [(from, initial_balance)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); - let from = account_id; - let from_key = key; - let to = AccountId::new([2; 32]); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); + let to_key = PrivateKey::try_new([2; 32]).unwrap(); + let to = AccountId::from(&PublicKey::new_from_private_key(&to_key)); let amount: u128 = 37; // Check the recipient is an uninitialized account @@ -2224,26 +2475,80 @@ pub mod tests { let expected_recipient_post = Account { program_owner: program.id(), balance: amount, + nonce: Nonce(1), ..Account::default() }; let message = public_transaction::Message::try_new( program.id(), vec![from, to], - vec![Nonce(0)], + vec![Nonce(0), Nonce(0)], amount, ) .unwrap(); - let witness_set = public_transaction::WitnessSet::for_message(&message, &[&from_key]); + let witness_set = + public_transaction::WitnessSet::for_message(&message, &[&from_key, &to_key]); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let recipient_post = state.get_account_by_id(to); assert_eq!(recipient_post, expected_recipient_post); } + #[test] + fn unauthorized_public_account_claiming_fails() { + let program = Program::authenticated_transfer_program(); + let account_key = PrivateKey::try_new([9; 32]).unwrap(); + let account_id = AccountId::from(&PublicKey::new_from_private_key(&account_key)); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + assert_eq!(state.get_account_by_id(account_id), Account::default()); + + let message = + public_transaction::Message::try_new(program.id(), vec![account_id], vec![], 0_u128) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + let tx = PublicTransaction::new(message, witness_set); + + let result = state.transition_from_public_transaction(&tx, 1, 0); + + assert!(matches!(result, Err(NssaError::ProgramExecutionFailed(_)))); + assert_eq!(state.get_account_by_id(account_id), Account::default()); + } + + #[test] + fn authorized_public_account_claiming_succeeds() { + let program = Program::authenticated_transfer_program(); + let account_key = PrivateKey::try_new([10; 32]).unwrap(); + let account_id = AccountId::from(&PublicKey::new_from_private_key(&account_key)); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); + + assert_eq!(state.get_account_by_id(account_id), Account::default()); + + let message = public_transaction::Message::try_new( + program.id(), + vec![account_id], + vec![Nonce(0)], + 0_u128, + ) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[&account_key]); + let tx = PublicTransaction::new(message, witness_set); + + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); + + assert_eq!( + state.get_account_by_id(account_id), + Account { + program_owner: program.id(), + nonce: Nonce(1), + ..Account::default() + } + ); + } + #[test] fn public_chained_call() { let program = Program::chain_caller(); @@ -2253,7 +2558,7 @@ pub mod tests { let initial_balance = 1000; let initial_data = [(from, initial_balance), (to, 0)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let from_key = key; let amount: u128 = 37; let instruction: (u128, ProgramId, u32, Option) = ( @@ -2280,7 +2585,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[&from_key]); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let from_post = state.get_account_by_id(from); let to_post = state.get_account_by_id(to); @@ -2298,7 +2603,7 @@ pub mod tests { let initial_balance = 100; let initial_data = [(from, initial_balance), (to, 0)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let from_key = key; let amount: u128 = 0; let instruction: (u128, ProgramId, u32, Option) = ( @@ -2320,7 +2625,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[&from_key]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!( result, Err(NssaError::MaxChainedCallsDepthExceeded) @@ -2336,7 +2641,7 @@ pub mod tests { let initial_balance = 1000; let initial_data = [(from, initial_balance), (to, 0)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let amount: u128 = 58; let instruction: (u128, ProgramId, u32, Option) = ( amount, @@ -2361,7 +2666,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let from_post = state.get_account_by_id(from); let to_post = state.get_account_by_id(to); @@ -2377,15 +2682,14 @@ pub mod tests { // program and not the chained_caller program. let chain_caller = Program::chain_caller(); let auth_transfer = Program::authenticated_transfer_program(); - let key = PrivateKey::try_new([1; 32]).unwrap(); - let account_id = AccountId::from(&PublicKey::new_from_private_key(&key)); + let from_key = PrivateKey::try_new([1; 32]).unwrap(); + let from = AccountId::from(&PublicKey::new_from_private_key(&from_key)); let initial_balance = 100; - let initial_data = [(account_id, initial_balance)]; + let initial_data = [(from, initial_balance)]; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); - let from = account_id; - let from_key = key; - let to = AccountId::new([2; 32]); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); + let to_key = PrivateKey::try_new([2; 32]).unwrap(); + let to = AccountId::from(&PublicKey::new_from_private_key(&to_key)); let amount: u128 = 37; // Check the recipient is an uninitialized account @@ -2395,6 +2699,7 @@ pub mod tests { // The expected program owner is the authenticated transfer program program_owner: auth_transfer.id(), balance: amount, + nonce: Nonce(1), ..Account::default() }; @@ -2410,14 +2715,15 @@ pub mod tests { chain_caller.id(), vec![to, from], // The chain_caller program permutes the account order in the chain // call - vec![Nonce(0)], + vec![Nonce(0), Nonce(0)], instruction, ) .unwrap(); - let witness_set = public_transaction::WitnessSet::for_message(&message, &[&from_key]); + let witness_set = + public_transaction::WitnessSet::for_message(&message, &[&from_key, &to_key]); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let from_post = state.get_account_by_id(from); let to_post = state.get_account_by_id(to); @@ -2425,6 +2731,88 @@ pub mod tests { assert_eq!(to_post, expected_to_post); } + #[test] + fn unauthorized_public_account_claiming_fails_when_executed_privately() { + let program = Program::authenticated_transfer_program(); + let account_id = AccountId::new([11; 32]); + let public_account = AccountWithMetadata::new(Account::default(), false, account_id); + + let result = execute_and_prove( + vec![public_account], + Program::serialize_instruction(0_u128).unwrap(), + vec![0], + vec![], + vec![], + vec![], + &program.into(), + ); + + assert!(matches!(result, Err(NssaError::ProgramProveFailed(_)))); + } + + #[test] + fn authorized_public_account_claiming_succeeds_when_executed_privately() { + let program = Program::authenticated_transfer_program(); + let program_id = program.id(); + let sender_keys = test_private_account_keys_1(); + let sender_private_account = Account { + program_owner: program_id, + balance: 100, + ..Account::default() + }; + let sender_commitment = Commitment::new(&sender_keys.npk(), &sender_private_account); + let mut state = + V03State::new_with_genesis_accounts(&[], std::slice::from_ref(&sender_commitment), 0); + let sender_pre = AccountWithMetadata::new(sender_private_account, true, &sender_keys.npk()); + let recipient_private_key = PrivateKey::try_new([2; 32]).unwrap(); + let recipient_account_id = + AccountId::from(&PublicKey::new_from_private_key(&recipient_private_key)); + let recipient_pre = + AccountWithMetadata::new(Account::default(), true, recipient_account_id); + let esk = [5; 32]; + let shared_secret = SharedSecretKey::new(&esk, &sender_keys.vpk()); + let epk = EphemeralPublicKey::from_scalar(esk); + + let (output, proof) = execute_and_prove( + vec![sender_pre, recipient_pre], + Program::serialize_instruction(37_u128).unwrap(), + vec![1, 0], + vec![(sender_keys.npk(), shared_secret)], + vec![sender_keys.nsk], + vec![state.get_proof_for_commitment(&sender_commitment)], + &program.into(), + ) + .unwrap(); + + let message = Message::try_from_circuit_output( + vec![recipient_account_id], + vec![Nonce(0)], + vec![(sender_keys.npk(), sender_keys.vpk(), epk)], + output, + ) + .unwrap(); + + let witness_set = WitnessSet::for_message(&message, proof, &[&recipient_private_key]); + let tx = PrivacyPreservingTransaction::new(message, witness_set); + + state + .transition_from_privacy_preserving_transaction(&tx, 1, 0) + .unwrap(); + + let nullifier = Nullifier::for_account_update(&sender_commitment, &sender_keys.nsk); + assert!(state.private_state.1.contains(&nullifier)); + + assert_eq!( + state.get_account_by_id(recipient_account_id), + Account { + program_owner: program_id, + balance: 37, + nonce: Nonce(1), + ..Account::default() + } + ); + } + #[test_case::test_case(1; "single call")] #[test_case::test_case(2; "two calls")] fn private_chained_call(number_of_calls: u32) { @@ -2454,9 +2842,10 @@ pub mod tests { let from_commitment = Commitment::new(&from_keys.npk(), &from_account.account); let to_commitment = Commitment::new(&to_keys.npk(), &to_account.account); - let mut state = V02State::new_with_genesis_accounts( + let mut state = V03State::new_with_genesis_accounts( &[], &[from_commitment.clone(), to_commitment.clone()], + 0, ) .with_test_programs(); let amount: u128 = 37; @@ -2526,7 +2915,7 @@ pub mod tests { let transaction = PrivacyPreservingTransaction::new(message, witness_set); state - .transition_from_privacy_preserving_transaction(&transaction) + .transition_from_privacy_preserving_transaction(&transaction, 1, 0) .unwrap(); // Assert @@ -2563,39 +2952,50 @@ pub mod tests { ..Account::default() }; - let mut state = V02State::new_with_genesis_accounts(&[], &[]); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); state.add_pinata_token_program(pinata_definition_id); - // Execution of the token program to create new token for the pinata token - // definition and supply accounts + // Set up the token accounts directly (bypassing public transactions which + // would require signers for Claim::Authorized). The focus of this test is + // the PDA mechanism in the pinata program's chained call, not token creation. let total_supply: u128 = 10_000_000; - let instruction = token_core::Instruction::NewFungibleDefinition { + let token_definition = token_core::TokenDefinition::Fungible { name: String::from("PINATA"), total_supply, + metadata_id: None, }; - let message = public_transaction::Message::try_new( - token.id(), - vec![pinata_token_definition_id, pinata_token_holding_id], - vec![], - instruction, - ) - .unwrap(); - let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); - let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); - - // Execution of winner's token holding account initialization - let instruction = token_core::Instruction::InitializeAccount; - let message = public_transaction::Message::try_new( - token.id(), - vec![pinata_token_definition_id, winner_token_holding_id], - vec![], - instruction, - ) - .unwrap(); - let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); - let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + let token_holding = token_core::TokenHolding::Fungible { + definition_id: pinata_token_definition_id, + balance: total_supply, + }; + let winner_holding = token_core::TokenHolding::Fungible { + definition_id: pinata_token_definition_id, + balance: 0, + }; + state.force_insert_account( + pinata_token_definition_id, + Account { + program_owner: token.id(), + data: Data::from(&token_definition), + ..Account::default() + }, + ); + state.force_insert_account( + pinata_token_holding_id, + Account { + program_owner: token.id(), + data: Data::from(&token_holding), + ..Account::default() + }, + ); + state.force_insert_account( + winner_token_holding_id, + Account { + program_owner: token.id(), + data: Data::from(&winner_holding), + ..Account::default() + }, + ); // Submit a solution to the pinata program to claim the prize let solution: u128 = 989_106; @@ -2612,7 +3012,7 @@ pub mod tests { .unwrap(); let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let winner_token_holding_post = state.get_account_by_id(winner_token_holding_id); assert_eq!( @@ -2624,7 +3024,7 @@ pub mod tests { #[test] fn claiming_mechanism_cannot_claim_initialied_accounts() { let claimer = Program::claimer(); - let mut state = V02State::new_with_genesis_accounts(&[], &[]).with_test_programs(); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); let account_id = AccountId::new([2; 32]); // Insert an account with non-default program owner @@ -2642,7 +3042,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); } @@ -2659,12 +3059,13 @@ pub mod tests { let recipient_id = AccountId::from(&PublicKey::new_from_private_key(&recipient_key)); let recipient_init_balance: u128 = 10; - let mut state = V02State::new_with_genesis_accounts( + let mut state = V03State::new_with_genesis_accounts( &[ (sender_id, sender_init_balance), (recipient_id, recipient_init_balance), ], &[], + 0, ); state.insert_program(Program::modified_transfer_program()); @@ -2688,7 +3089,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[&sender_key]); let tx = PublicTransaction::new(message, witness_set); - let res = state.transition_from_public_transaction(&tx); + let res = state.transition_from_public_transaction(&tx, 1, 0); assert!(matches!(res, Err(NssaError::InvalidProgramBehavior))); let sender_post = state.get_account_by_id(sender_id); @@ -2714,7 +3115,7 @@ pub mod tests { #[test] fn private_authorized_uninitialized_account() { - let mut state = V02State::new_with_genesis_accounts(&[], &[]); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0); // Set up keys for the authorized private account let private_keys = test_private_account_keys_1(); @@ -2757,16 +3158,63 @@ pub mod tests { let witness_set = WitnessSet::for_message(&message, proof, &[]); let tx = PrivacyPreservingTransaction::new(message, witness_set); - let result = state.transition_from_privacy_preserving_transaction(&tx); + let result = state.transition_from_privacy_preserving_transaction(&tx, 1, 0); assert!(result.is_ok()); let nullifier = Nullifier::for_account_initialization(&private_keys.npk()); assert!(state.private_state.1.contains(&nullifier)); } + #[test] + fn private_unauthorized_uninitialized_account_can_still_be_claimed() { + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + + let private_keys = test_private_account_keys_1(); + // This is intentional: claim authorization was introduced to protect public accounts, + // especially PDAs. Private PDAs are not useful in practice because there is no way to + // operate them without the corresponding private keys, so unauthorized private claiming + // remains allowed. + let unauthorized_account = + AccountWithMetadata::new(Account::default(), false, &private_keys.npk()); + + let program = Program::claimer(); + let esk = [5; 32]; + let shared_secret = SharedSecretKey::new(&esk, &private_keys.vpk()); + let epk = EphemeralPublicKey::from_scalar(esk); + + let (output, proof) = execute_and_prove( + vec![unauthorized_account], + Program::serialize_instruction(0_u128).unwrap(), + vec![2], + vec![(private_keys.npk(), shared_secret)], + vec![], + vec![None], + &program.into(), + ) + .unwrap(); + + let message = Message::try_from_circuit_output( + vec![], + vec![], + vec![(private_keys.npk(), private_keys.vpk(), epk)], + output, + ) + .unwrap(); + + let witness_set = WitnessSet::for_message(&message, proof, &[]); + let tx = PrivacyPreservingTransaction::new(message, witness_set); + + state + .transition_from_privacy_preserving_transaction(&tx, 1, 0) + .unwrap(); + + let nullifier = Nullifier::for_account_initialization(&private_keys.npk()); + assert!(state.private_state.1.contains(&nullifier)); + } + #[test] fn private_account_claimed_then_used_without_init_flag_should_fail() { - let mut state = V02State::new_with_genesis_accounts(&[], &[]).with_test_programs(); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); // Set up keys for the private account let private_keys = test_private_account_keys_1(); @@ -2810,7 +3258,7 @@ pub mod tests { // Claim should succeed assert!( state - .transition_from_privacy_preserving_transaction(&tx) + .transition_from_privacy_preserving_transaction(&tx, 1, 0) .is_ok() ); @@ -2847,7 +3295,7 @@ pub mod tests { fn public_changer_claimer_no_data_change_no_claim_succeeds() { let initial_data = []; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_id = AccountId::new([1; 32]); let program_id = Program::changer_claimer().id(); // Don't change data (None) and don't claim (false) @@ -2859,7 +3307,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); // Should succeed - no changes made, no claim needed assert!(result.is_ok()); @@ -2871,7 +3319,7 @@ pub mod tests { fn public_changer_claimer_data_change_no_claim_fails() { let initial_data = []; let mut state = - V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let account_id = AccountId::new([1; 32]); let program_id = Program::changer_claimer().id(); // Change data but don't claim (false) - should fail @@ -2884,7 +3332,7 @@ pub mod tests { let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); let tx = PublicTransaction::new(message, witness_set); - let result = state.transition_from_public_transaction(&tx); + let result = state.transition_from_public_transaction(&tx, 1, 0); // Should fail - cannot modify data without claiming the account assert!(matches!(result, Err(NssaError::InvalidProgramBehavior))); @@ -2965,9 +3413,10 @@ pub mod tests { let recipient_commitment = Commitment::new(&recipient_keys.npk(), &recipient_account.account); - let state = V02State::new_with_genesis_accounts( + let state = V03State::new_with_genesis_accounts( &[(sender_account.account_id, sender_account.account.balance)], std::slice::from_ref(&recipient_commitment), + 0, ) .with_test_programs(); @@ -2996,14 +3445,717 @@ pub mod tests { assert!(matches!(result, Err(NssaError::CircuitProvingError(_)))); } + #[test_case::test_case((Some(1), Some(3)), 3; "at upper bound")] + #[test_case::test_case((Some(1), Some(3)), 2; "inside range")] + #[test_case::test_case((Some(1), Some(3)), 0; "below range")] + #[test_case::test_case((Some(1), Some(3)), 1; "at lower bound")] + #[test_case::test_case((Some(1), Some(3)), 4; "above range")] + #[test_case::test_case((Some(1), None), 1; "lower bound only - at bound")] + #[test_case::test_case((Some(1), None), 10; "lower bound only - above")] + #[test_case::test_case((Some(1), None), 0; "lower bound only - below")] + #[test_case::test_case((None, Some(3)), 3; "upper bound only - at bound")] + #[test_case::test_case((None, Some(3)), 0; "upper bound only - below")] + #[test_case::test_case((None, Some(3)), 4; "upper bound only - above")] + #[test_case::test_case((None, None), 0; "no bounds - always valid")] + #[test_case::test_case((None, None), 100; "no bounds - always valid 2")] + fn validity_window_works_in_public_transactions( + validity_window: (Option, Option), + block_id: BlockId, + ) { + let block_validity_window: BlockValidityWindow = validity_window.try_into().unwrap(); + let validity_window_program = Program::validity_window(); + let account_keys = test_public_account_keys_1(); + let pre = AccountWithMetadata::new(Account::default(), false, account_keys.account_id()); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + let tx = { + let account_ids = vec![pre.account_id]; + let nonces = vec![]; + let program_id = validity_window_program.id(); + let instruction = ( + block_validity_window, + TimestampValidityWindow::new_unbounded(), + ); + let message = + public_transaction::Message::try_new(program_id, account_ids, nonces, instruction) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + PublicTransaction::new(message, witness_set) + }; + let result = state.transition_from_public_transaction(&tx, block_id, 0); + let is_inside_validity_window = + match (block_validity_window.start(), block_validity_window.end()) { + (Some(s), Some(e)) => s <= block_id && block_id < e, + (Some(s), None) => s <= block_id, + (None, Some(e)) => block_id < e, + (None, None) => true, + }; + if is_inside_validity_window { + assert!(result.is_ok()); + } else { + assert!(matches!(result, Err(NssaError::OutOfValidityWindow))); + } + } + + #[test_case::test_case((Some(1), Some(3)), 3; "at upper bound")] + #[test_case::test_case((Some(1), Some(3)), 2; "inside range")] + #[test_case::test_case((Some(1), Some(3)), 0; "below range")] + #[test_case::test_case((Some(1), Some(3)), 1; "at lower bound")] + #[test_case::test_case((Some(1), Some(3)), 4; "above range")] + #[test_case::test_case((Some(1), None), 1; "lower bound only - at bound")] + #[test_case::test_case((Some(1), None), 10; "lower bound only - above")] + #[test_case::test_case((Some(1), None), 0; "lower bound only - below")] + #[test_case::test_case((None, Some(3)), 3; "upper bound only - at bound")] + #[test_case::test_case((None, Some(3)), 0; "upper bound only - below")] + #[test_case::test_case((None, Some(3)), 4; "upper bound only - above")] + #[test_case::test_case((None, None), 0; "no bounds - always valid")] + #[test_case::test_case((None, None), 100; "no bounds - always valid 2")] + fn timestamp_validity_window_works_in_public_transactions( + validity_window: (Option, Option), + timestamp: Timestamp, + ) { + let timestamp_validity_window: TimestampValidityWindow = + validity_window.try_into().unwrap(); + let validity_window_program = Program::validity_window(); + let account_keys = test_public_account_keys_1(); + let pre = AccountWithMetadata::new(Account::default(), false, account_keys.account_id()); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + let tx = { + let account_ids = vec![pre.account_id]; + let nonces = vec![]; + let program_id = validity_window_program.id(); + let instruction = ( + BlockValidityWindow::new_unbounded(), + timestamp_validity_window, + ); + let message = + public_transaction::Message::try_new(program_id, account_ids, nonces, instruction) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + PublicTransaction::new(message, witness_set) + }; + let result = state.transition_from_public_transaction(&tx, 1, timestamp); + let is_inside_validity_window = match ( + timestamp_validity_window.start(), + timestamp_validity_window.end(), + ) { + (Some(s), Some(e)) => s <= timestamp && timestamp < e, + (Some(s), None) => s <= timestamp, + (None, Some(e)) => timestamp < e, + (None, None) => true, + }; + if is_inside_validity_window { + assert!(result.is_ok()); + } else { + assert!(matches!(result, Err(NssaError::OutOfValidityWindow))); + } + } + + #[test_case::test_case((Some(1), Some(3)), 3; "at upper bound")] + #[test_case::test_case((Some(1), Some(3)), 2; "inside range")] + #[test_case::test_case((Some(1), Some(3)), 0; "below range")] + #[test_case::test_case((Some(1), Some(3)), 1; "at lower bound")] + #[test_case::test_case((Some(1), Some(3)), 4; "above range")] + #[test_case::test_case((Some(1), None), 1; "lower bound only - at bound")] + #[test_case::test_case((Some(1), None), 10; "lower bound only - above")] + #[test_case::test_case((Some(1), None), 0; "lower bound only - below")] + #[test_case::test_case((None, Some(3)), 3; "upper bound only - at bound")] + #[test_case::test_case((None, Some(3)), 0; "upper bound only - below")] + #[test_case::test_case((None, Some(3)), 4; "upper bound only - above")] + #[test_case::test_case((None, None), 0; "no bounds - always valid")] + #[test_case::test_case((None, None), 100; "no bounds - always valid 2")] + fn validity_window_works_in_privacy_preserving_transactions( + validity_window: (Option, Option), + block_id: BlockId, + ) { + let block_validity_window: BlockValidityWindow = validity_window.try_into().unwrap(); + let validity_window_program = Program::validity_window(); + let account_keys = test_private_account_keys_1(); + let pre = AccountWithMetadata::new(Account::default(), false, &account_keys.npk()); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + let tx = { + let esk = [3; 32]; + let shared_secret = SharedSecretKey::new(&esk, &account_keys.vpk()); + let epk = EphemeralPublicKey::from_scalar(esk); + + let instruction = ( + block_validity_window, + TimestampValidityWindow::new_unbounded(), + ); + let (output, proof) = circuit::execute_and_prove( + vec![pre], + Program::serialize_instruction(instruction).unwrap(), + vec![2], + vec![(account_keys.npk(), shared_secret)], + vec![], + vec![None], + &validity_window_program.into(), + ) + .unwrap(); + + let message = Message::try_from_circuit_output( + vec![], + vec![], + vec![(account_keys.npk(), account_keys.vpk(), epk)], + output, + ) + .unwrap(); + + let witness_set = WitnessSet::for_message(&message, proof, &[]); + PrivacyPreservingTransaction::new(message, witness_set) + }; + let result = state.transition_from_privacy_preserving_transaction(&tx, block_id, 0); + let is_inside_validity_window = + match (block_validity_window.start(), block_validity_window.end()) { + (Some(s), Some(e)) => s <= block_id && block_id < e, + (Some(s), None) => s <= block_id, + (None, Some(e)) => block_id < e, + (None, None) => true, + }; + if is_inside_validity_window { + assert!(result.is_ok()); + } else { + assert!(matches!(result, Err(NssaError::OutOfValidityWindow))); + } + } + + #[test_case::test_case((Some(1), Some(3)), 3; "at upper bound")] + #[test_case::test_case((Some(1), Some(3)), 2; "inside range")] + #[test_case::test_case((Some(1), Some(3)), 0; "below range")] + #[test_case::test_case((Some(1), Some(3)), 1; "at lower bound")] + #[test_case::test_case((Some(1), Some(3)), 4; "above range")] + #[test_case::test_case((Some(1), None), 1; "lower bound only - at bound")] + #[test_case::test_case((Some(1), None), 10; "lower bound only - above")] + #[test_case::test_case((Some(1), None), 0; "lower bound only - below")] + #[test_case::test_case((None, Some(3)), 3; "upper bound only - at bound")] + #[test_case::test_case((None, Some(3)), 0; "upper bound only - below")] + #[test_case::test_case((None, Some(3)), 4; "upper bound only - above")] + #[test_case::test_case((None, None), 0; "no bounds - always valid")] + #[test_case::test_case((None, None), 100; "no bounds - always valid 2")] + fn timestamp_validity_window_works_in_privacy_preserving_transactions( + validity_window: (Option, Option), + timestamp: Timestamp, + ) { + let timestamp_validity_window: TimestampValidityWindow = + validity_window.try_into().unwrap(); + let validity_window_program = Program::validity_window(); + let account_keys = test_private_account_keys_1(); + let pre = AccountWithMetadata::new(Account::default(), false, &account_keys.npk()); + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + let tx = { + let esk = [3; 32]; + let shared_secret = SharedSecretKey::new(&esk, &account_keys.vpk()); + let epk = EphemeralPublicKey::from_scalar(esk); + + let instruction = ( + BlockValidityWindow::new_unbounded(), + timestamp_validity_window, + ); + let (output, proof) = circuit::execute_and_prove( + vec![pre], + Program::serialize_instruction(instruction).unwrap(), + vec![2], + vec![(account_keys.npk(), shared_secret)], + vec![], + vec![None], + &validity_window_program.into(), + ) + .unwrap(); + + let message = Message::try_from_circuit_output( + vec![], + vec![], + vec![(account_keys.npk(), account_keys.vpk(), epk)], + output, + ) + .unwrap(); + + let witness_set = WitnessSet::for_message(&message, proof, &[]); + PrivacyPreservingTransaction::new(message, witness_set) + }; + let result = state.transition_from_privacy_preserving_transaction(&tx, 1, timestamp); + let is_inside_validity_window = match ( + timestamp_validity_window.start(), + timestamp_validity_window.end(), + ) { + (Some(s), Some(e)) => s <= timestamp && timestamp < e, + (Some(s), None) => s <= timestamp, + (None, Some(e)) => timestamp < e, + (None, None) => true, + }; + if is_inside_validity_window { + assert!(result.is_ok()); + } else { + assert!(matches!(result, Err(NssaError::OutOfValidityWindow))); + } + } + + fn time_locked_transfer_transaction( + from: AccountId, + from_key: &PrivateKey, + from_nonce: u128, + to: AccountId, + clock_account_id: AccountId, + amount: u128, + deadline: u64, + ) -> PublicTransaction { + let program_id = Program::time_locked_transfer().id(); + let message = public_transaction::Message::try_new( + program_id, + vec![from, to, clock_account_id], + vec![Nonce(from_nonce)], + (amount, deadline), + ) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[from_key]); + PublicTransaction::new(message, witness_set) + } + + #[test] + fn time_locked_transfer_succeeds_when_deadline_has_passed() { + let recipient_id = AccountId::new([42; 32]); + let genesis_timestamp = 500_u64; + let mut state = + V03State::new_with_genesis_accounts(&[(recipient_id, 0)], &[], genesis_timestamp) + .with_test_programs(); + let key1 = PrivateKey::try_new([1; 32]).unwrap(); + let sender_id = AccountId::from(&PublicKey::new_from_private_key(&key1)); + state.force_insert_account( + sender_id, + Account { + program_owner: Program::time_locked_transfer().id(), + balance: 100, + ..Account::default() + }, + ); + + let amount = 100_u128; + // Deadline in the past: transfer should succeed. + let deadline = 0_u64; + + let tx = time_locked_transfer_transaction( + sender_id, + &key1, + 0, + recipient_id, + CLOCK_01_PROGRAM_ACCOUNT_ID, + amount, + deadline, + ); + + let block_id = 1; + let timestamp = genesis_timestamp + 100; + state + .transition_from_public_transaction(&tx, block_id, timestamp) + .unwrap(); + + // Balances changed. + assert_eq!(state.get_account_by_id(sender_id).balance, 0); + assert_eq!(state.get_account_by_id(recipient_id).balance, 100); + } + + #[test] + fn time_locked_transfer_fails_when_deadline_is_in_the_future() { + let recipient_id = AccountId::new([42; 32]); + let genesis_timestamp = 500_u64; + let mut state = + V03State::new_with_genesis_accounts(&[(recipient_id, 0)], &[], genesis_timestamp) + .with_test_programs(); + let key1 = PrivateKey::try_new([1; 32]).unwrap(); + let sender_id = AccountId::from(&PublicKey::new_from_private_key(&key1)); + state.force_insert_account( + sender_id, + Account { + program_owner: Program::time_locked_transfer().id(), + balance: 100, + ..Account::default() + }, + ); + + let amount = 100_u128; + // Far-future deadline: program should panic. + let deadline = u64::MAX; + + let tx = time_locked_transfer_transaction( + sender_id, + &key1, + 0, + recipient_id, + CLOCK_01_PROGRAM_ACCOUNT_ID, + amount, + deadline, + ); + + let block_id = 1; + let timestamp = genesis_timestamp + 100; + let result = state.transition_from_public_transaction(&tx, block_id, timestamp); + + assert!( + result.is_err(), + "Transfer should fail when deadline is in the future" + ); + // Balances unchanged. + assert_eq!(state.get_account_by_id(sender_id).balance, 100); + assert_eq!(state.get_account_by_id(recipient_id).balance, 0); + } + + fn pinata_cooldown_data(prize: u128, cooldown_ms: u64, last_claim_timestamp: u64) -> Vec { + let mut buf = Vec::with_capacity(32); + buf.extend_from_slice(&prize.to_le_bytes()); + buf.extend_from_slice(&cooldown_ms.to_le_bytes()); + buf.extend_from_slice(&last_claim_timestamp.to_le_bytes()); + buf + } + + fn pinata_cooldown_transaction( + pinata_id: AccountId, + winner_id: AccountId, + clock_account_id: AccountId, + ) -> PublicTransaction { + let program_id = Program::pinata_cooldown().id(); + let message = public_transaction::Message::try_new( + program_id, + vec![pinata_id, winner_id, clock_account_id], + vec![], + (), + ) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + PublicTransaction::new(message, witness_set) + } + + #[test] + fn pinata_cooldown_claim_succeeds_after_cooldown() { + let winner_id = AccountId::new([11; 32]); + let pinata_id = AccountId::new([99; 32]); + + let genesis_timestamp = 1000_u64; + let mut state = + V03State::new_with_genesis_accounts(&[(winner_id, 0)], &[], genesis_timestamp) + .with_test_programs(); + + let prize = 50_u128; + let cooldown_ms = 500_u64; + // Last claim was at genesis, so any timestamp >= genesis + cooldown should work. + let last_claim_timestamp = genesis_timestamp; + + state.force_insert_account( + pinata_id, + Account { + program_owner: Program::pinata_cooldown().id(), + balance: 1000, + data: pinata_cooldown_data(prize, cooldown_ms, last_claim_timestamp) + .try_into() + .unwrap(), + ..Account::default() + }, + ); + + let tx = pinata_cooldown_transaction(pinata_id, winner_id, CLOCK_01_PROGRAM_ACCOUNT_ID); + + let block_id = 1; + let block_timestamp = genesis_timestamp + cooldown_ms; + // Advance clock so the cooldown check reads an updated timestamp. + let clock_tx = clock_transaction(block_timestamp); + state + .transition_from_public_transaction(&clock_tx, block_id, block_timestamp) + .unwrap(); + + state + .transition_from_public_transaction(&tx, block_id, block_timestamp) + .unwrap(); + + assert_eq!(state.get_account_by_id(pinata_id).balance, 1000 - prize); + assert_eq!(state.get_account_by_id(winner_id).balance, prize); + } + + #[test] + fn pinata_cooldown_claim_fails_during_cooldown() { + let winner_id = AccountId::new([11; 32]); + let pinata_id = AccountId::new([99; 32]); + + let genesis_timestamp = 1000_u64; + let mut state = + V03State::new_with_genesis_accounts(&[(winner_id, 0)], &[], genesis_timestamp) + .with_test_programs(); + + let prize = 50_u128; + let cooldown_ms = 500_u64; + let last_claim_timestamp = genesis_timestamp; + + state.force_insert_account( + pinata_id, + Account { + balance: 1000, + data: pinata_cooldown_data(prize, cooldown_ms, last_claim_timestamp) + .try_into() + .unwrap(), + ..Account::default() + }, + ); + + let tx = pinata_cooldown_transaction(pinata_id, winner_id, CLOCK_01_PROGRAM_ACCOUNT_ID); + + let block_id = 1; + // Timestamp is only 100ms after last claim, well within the 500ms cooldown. + let block_timestamp = genesis_timestamp + 100; + let clock_tx = clock_transaction(block_timestamp); + state + .transition_from_public_transaction(&clock_tx, block_id, block_timestamp) + .unwrap(); + + let result = state.transition_from_public_transaction(&tx, block_id, block_timestamp); + + assert!(result.is_err(), "Claim should fail during cooldown period"); + assert_eq!(state.get_account_by_id(pinata_id).balance, 1000); + assert_eq!(state.get_account_by_id(winner_id).balance, 0); + } + #[test] fn state_serialization_roundtrip() { let account_id_1 = AccountId::new([1; 32]); let account_id_2 = AccountId::new([2; 32]); let initial_data = [(account_id_1, 100_u128), (account_id_2, 151_u128)]; - let state = V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs(); + let state = V03State::new_with_genesis_accounts(&initial_data, &[], 0).with_test_programs(); let bytes = borsh::to_vec(&state).unwrap(); - let state_from_bytes: V02State = borsh::from_slice(&bytes).unwrap(); + let state_from_bytes: V03State = borsh::from_slice(&bytes).unwrap(); assert_eq!(state, state_from_bytes); } + + #[test] + fn flash_swap_successful() { + let initiator = Program::flash_swap_initiator(); + let callback = Program::flash_swap_callback(); + let token = Program::authenticated_transfer_program(); + + let vault_id = AccountId::from((&initiator.id(), &PdaSeed::new([0_u8; 32]))); + let receiver_id = AccountId::from((&callback.id(), &PdaSeed::new([1_u8; 32]))); + + let initial_balance: u128 = 1000; + let amount_out: u128 = 100; + + let vault_account = Account { + program_owner: token.id(), + balance: initial_balance, + ..Account::default() + }; + let receiver_account = Account { + program_owner: token.id(), + balance: 0, + ..Account::default() + }; + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(vault_id, vault_account); + state.force_insert_account(receiver_id, receiver_account); + + // Callback instruction: return funds + let cb_instruction = CallbackInstruction { + return_funds: true, + token_program_id: token.id(), + amount: amount_out, + }; + let cb_data = Program::serialize_instruction(cb_instruction).unwrap(); + + let instruction = FlashSwapInstruction::Initiate { + token_program_id: token.id(), + callback_program_id: callback.id(), + amount_out, + callback_instruction_data: cb_data, + }; + + let tx = build_flash_swap_tx(&initiator, vault_id, receiver_id, instruction); + let result = state.transition_from_public_transaction(&tx, 1, 0); + assert!(result.is_ok(), "flash swap should succeed: {result:?}"); + + // Vault balance restored, receiver back to 0 + assert_eq!(state.get_account_by_id(vault_id).balance, initial_balance); + assert_eq!(state.get_account_by_id(receiver_id).balance, 0); + } + + #[test] + fn flash_swap_callback_keeps_funds_rollback() { + let initiator = Program::flash_swap_initiator(); + let callback = Program::flash_swap_callback(); + let token = Program::authenticated_transfer_program(); + + let vault_id = AccountId::from((&initiator.id(), &PdaSeed::new([0_u8; 32]))); + let receiver_id = AccountId::from((&callback.id(), &PdaSeed::new([1_u8; 32]))); + + let initial_balance: u128 = 1000; + let amount_out: u128 = 100; + + let vault_account = Account { + program_owner: token.id(), + balance: initial_balance, + ..Account::default() + }; + let receiver_account = Account { + program_owner: token.id(), + balance: 0, + ..Account::default() + }; + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(vault_id, vault_account); + state.force_insert_account(receiver_id, receiver_account); + + // Callback instruction: do NOT return funds + let cb_instruction = CallbackInstruction { + return_funds: false, + token_program_id: token.id(), + amount: amount_out, + }; + let cb_data = Program::serialize_instruction(cb_instruction).unwrap(); + + let instruction = FlashSwapInstruction::Initiate { + token_program_id: token.id(), + callback_program_id: callback.id(), + amount_out, + callback_instruction_data: cb_data, + }; + + let tx = build_flash_swap_tx(&initiator, vault_id, receiver_id, instruction); + let result = state.transition_from_public_transaction(&tx, 1, 0); + + // Invariant check fails → entire tx rolls back + assert!( + result.is_err(), + "flash swap should fail when callback keeps funds" + ); + + // State unchanged (rollback) + assert_eq!(state.get_account_by_id(vault_id).balance, initial_balance); + assert_eq!(state.get_account_by_id(receiver_id).balance, 0); + } + + #[test] + fn flash_swap_self_call_targets_correct_program() { + // Zero-amount flash swap: the invariant self-call still runs and succeeds + // because vault balance doesn't decrease. + let initiator = Program::flash_swap_initiator(); + let callback = Program::flash_swap_callback(); + let token = Program::authenticated_transfer_program(); + + let vault_id = AccountId::from((&initiator.id(), &PdaSeed::new([0_u8; 32]))); + let receiver_id = AccountId::from((&callback.id(), &PdaSeed::new([1_u8; 32]))); + + let initial_balance: u128 = 1000; + + let vault_account = Account { + program_owner: token.id(), + balance: initial_balance, + ..Account::default() + }; + let receiver_account = Account { + program_owner: token.id(), + balance: 0, + ..Account::default() + }; + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(vault_id, vault_account); + state.force_insert_account(receiver_id, receiver_account); + + let cb_instruction = CallbackInstruction { + return_funds: true, + token_program_id: token.id(), + amount: 0, + }; + let cb_data = Program::serialize_instruction(cb_instruction).unwrap(); + + let instruction = FlashSwapInstruction::Initiate { + token_program_id: token.id(), + callback_program_id: callback.id(), + amount_out: 0, + callback_instruction_data: cb_data, + }; + + let tx = build_flash_swap_tx(&initiator, vault_id, receiver_id, instruction); + let result = state.transition_from_public_transaction(&tx, 1, 0); + assert!( + result.is_ok(), + "zero-amount flash swap should succeed: {result:?}" + ); + } + + #[test] + fn flash_swap_standalone_invariant_check_rejected() { + // Calling InvariantCheck directly (not as a chained self-call) should fail + // because caller_program_id will be None. + let initiator = Program::flash_swap_initiator(); + let token = Program::authenticated_transfer_program(); + + let vault_id = AccountId::from((&initiator.id(), &PdaSeed::new([0_u8; 32]))); + + let vault_account = Account { + program_owner: token.id(), + balance: 1000, + ..Account::default() + }; + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(vault_id, vault_account); + + let instruction = FlashSwapInstruction::InvariantCheck { + min_vault_balance: 1000, + }; + + let message = public_transaction::Message::try_new( + initiator.id(), + vec![vault_id], + vec![], + instruction, + ) + .unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + let tx = PublicTransaction::new(message, witness_set); + + let result = state.transition_from_public_transaction(&tx, 1, 0); + assert!( + result.is_err(), + "standalone InvariantCheck should be rejected (caller_program_id is None)" + ); + } + + #[test] + fn malicious_self_program_id_rejected_in_public_execution() { + let program = Program::malicious_self_program_id(); + let acc_id = AccountId::new([99; 32]); + let account = Account::default(); + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(acc_id, account); + + let message = + public_transaction::Message::try_new(program.id(), vec![acc_id], vec![], ()).unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + let tx = PublicTransaction::new(message, witness_set); + + let result = state.transition_from_public_transaction(&tx, 1, 0); + assert!( + result.is_err(), + "program with wrong self_program_id in output should be rejected" + ); + } + + #[test] + fn malicious_caller_program_id_rejected_in_public_execution() { + let program = Program::malicious_caller_program_id(); + let acc_id = AccountId::new([99; 32]); + let account = Account::default(); + + let mut state = V03State::new_with_genesis_accounts(&[], &[], 0).with_test_programs(); + state.force_insert_account(acc_id, account); + + let message = + public_transaction::Message::try_new(program.id(), vec![acc_id], vec![], ()).unwrap(); + let witness_set = public_transaction::WitnessSet::for_message(&message, &[]); + let tx = PublicTransaction::new(message, witness_set); + + let result = state.transition_from_public_transaction(&tx, 1, 0); + assert!( + result.is_err(), + "program with spoofed caller_program_id in output should be rejected" + ); + } } diff --git a/nssa/src/validated_state_diff.rs b/nssa/src/validated_state_diff.rs new file mode 100644 index 00000000..9614d1b7 --- /dev/null +++ b/nssa/src/validated_state_diff.rs @@ -0,0 +1,442 @@ +use std::{ + collections::{HashMap, HashSet, VecDeque}, + hash::Hash, +}; + +use log::debug; +use nssa_core::{ + BlockId, Commitment, Nullifier, PrivacyPreservingCircuitOutput, Timestamp, + account::{Account, AccountId, AccountWithMetadata}, + program::{ + ChainedCall, Claim, DEFAULT_PROGRAM_ID, compute_authorized_pdas, validate_execution, + }, +}; + +use crate::{ + V03State, ensure, + error::NssaError, + privacy_preserving_transaction::{ + PrivacyPreservingTransaction, circuit::Proof, message::Message, + }, + program::Program, + program_deployment_transaction::ProgramDeploymentTransaction, + public_transaction::PublicTransaction, + state::MAX_NUMBER_CHAINED_CALLS, +}; + +pub struct StateDiff { + pub signer_account_ids: Vec, + pub public_diff: HashMap, + pub new_commitments: Vec, + pub new_nullifiers: Vec, + pub program: Option, +} + +/// The validated output of executing or verifying a transaction, ready to be applied to the state. +/// +/// Can only be constructed by the transaction validation functions inside this crate, ensuring the +/// diff has been checked before any state mutation occurs. +pub struct ValidatedStateDiff(StateDiff); + +impl ValidatedStateDiff { + pub fn from_public_transaction( + tx: &PublicTransaction, + state: &V03State, + block_id: BlockId, + timestamp: Timestamp, + ) -> Result { + let message = tx.message(); + let witness_set = tx.witness_set(); + + // All account_ids must be different + ensure!( + message.account_ids.iter().collect::>().len() == message.account_ids.len(), + NssaError::InvalidInput("Duplicate account_ids found in message".into(),) + ); + + // Check exactly one nonce is provided for each signature + ensure!( + message.nonces.len() == witness_set.signatures_and_public_keys.len(), + NssaError::InvalidInput( + "Mismatch between number of nonces and signatures/public keys".into(), + ) + ); + + // Check the signatures are valid + ensure!( + witness_set.is_valid_for(message), + NssaError::InvalidInput("Invalid signature for given message and public key".into()) + ); + + let signer_account_ids = tx.signer_account_ids(); + // Check nonces corresponds to the current nonces on the public state. + for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) { + let current_nonce = state.get_account_by_id(*account_id).nonce; + ensure!( + current_nonce == *nonce, + NssaError::InvalidInput("Nonce mismatch".into()) + ); + } + + // Build pre_states for execution + let input_pre_states: Vec<_> = message + .account_ids + .iter() + .map(|account_id| { + AccountWithMetadata::new( + state.get_account_by_id(*account_id), + signer_account_ids.contains(account_id), + *account_id, + ) + }) + .collect(); + + let mut state_diff: HashMap = HashMap::new(); + + let initial_call = ChainedCall { + program_id: message.program_id, + instruction_data: message.instruction_data.clone(), + pre_states: input_pre_states, + pda_seeds: vec![], + }; + + let mut chained_calls = VecDeque::from_iter([(initial_call, None)]); + let mut chain_calls_counter = 0; + + while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() { + ensure!( + chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS, + NssaError::MaxChainedCallsDepthExceeded + ); + + // Check that the `program_id` corresponds to a deployed program + let Some(program) = state.programs().get(&chained_call.program_id) else { + return Err(NssaError::InvalidInput("Unknown program".into())); + }; + + debug!( + "Program {:?} pre_states: {:?}, instruction_data: {:?}", + chained_call.program_id, chained_call.pre_states, chained_call.instruction_data + ); + let mut program_output = program.execute( + caller_program_id, + &chained_call.pre_states, + &chained_call.instruction_data, + )?; + debug!( + "Program {:?} output: {:?}", + chained_call.program_id, program_output + ); + + let authorized_pdas = + compute_authorized_pdas(caller_program_id, &chained_call.pda_seeds); + + let is_authorized = |account_id: &AccountId| { + signer_account_ids.contains(account_id) || authorized_pdas.contains(account_id) + }; + + for pre in &program_output.pre_states { + let account_id = pre.account_id; + // Check that the program output pre_states coincide with the values in the public + // state or with any modifications to those values during the chain of calls. + let expected_pre = state_diff + .get(&account_id) + .cloned() + .unwrap_or_else(|| state.get_account_by_id(account_id)); + ensure!( + pre.account == expected_pre, + NssaError::InvalidProgramBehavior + ); + + // Check that authorization flags are consistent with the provided ones or + // authorized by program through the PDA mechanism + ensure!( + pre.is_authorized == is_authorized(&account_id), + NssaError::InvalidProgramBehavior + ); + } + + // Verify that the program output's self_program_id matches the expected program ID. + ensure!( + program_output.self_program_id == chained_call.program_id, + NssaError::InvalidProgramBehavior + ); + + // Verify that the program output's caller_program_id matches the actual caller. + ensure!( + program_output.caller_program_id == caller_program_id, + NssaError::InvalidProgramBehavior + ); + + // Verify execution corresponds to a well-behaved program. + // See the # Programs section for the definition of the `validate_execution` method. + ensure!( + validate_execution( + &program_output.pre_states, + &program_output.post_states, + chained_call.program_id, + ), + NssaError::InvalidProgramBehavior + ); + + // Verify validity window + ensure!( + program_output.block_validity_window.is_valid_for(block_id) + && program_output + .timestamp_validity_window + .is_valid_for(timestamp), + NssaError::OutOfValidityWindow + ); + + for (i, post) in program_output.post_states.iter_mut().enumerate() { + let Some(claim) = post.required_claim() else { + continue; + }; + // The invoked program can only claim accounts with default program id. + ensure!( + post.account().program_owner == DEFAULT_PROGRAM_ID, + NssaError::InvalidProgramBehavior + ); + + let account_id = program_output.pre_states[i].account_id; + + match claim { + Claim::Authorized => { + // The program can only claim accounts that were authorized by the signer. + ensure!( + is_authorized(&account_id), + NssaError::InvalidProgramBehavior + ); + } + Claim::Pda(seed) => { + // The program can only claim accounts that correspond to the PDAs it is + // authorized to claim. + let pda = AccountId::from((&chained_call.program_id, &seed)); + ensure!(account_id == pda, NssaError::InvalidProgramBehavior); + } + } + + post.account_mut().program_owner = chained_call.program_id; + } + + // Update the state diff + for (pre, post) in program_output + .pre_states + .iter() + .zip(program_output.post_states.iter()) + { + state_diff.insert(pre.account_id, post.account().clone()); + } + + for new_call in program_output.chained_calls.into_iter().rev() { + chained_calls.push_front((new_call, Some(chained_call.program_id))); + } + + chain_calls_counter = chain_calls_counter + .checked_add(1) + .expect("we check the max depth at the beginning of the loop"); + } + + // Check that all modified uninitialized accounts where claimed + for post in state_diff.iter().filter_map(|(account_id, post)| { + let pre = state.get_account_by_id(*account_id); + if pre.program_owner != DEFAULT_PROGRAM_ID { + return None; + } + if pre == *post { + return None; + } + Some(post) + }) { + ensure!( + post.program_owner != DEFAULT_PROGRAM_ID, + NssaError::InvalidProgramBehavior + ); + } + + Ok(Self(StateDiff { + signer_account_ids, + public_diff: state_diff, + new_commitments: vec![], + new_nullifiers: vec![], + program: None, + })) + } + + pub fn from_privacy_preserving_transaction( + tx: &PrivacyPreservingTransaction, + state: &V03State, + block_id: BlockId, + timestamp: Timestamp, + ) -> Result { + let message = &tx.message; + let witness_set = &tx.witness_set; + + // 1. Commitments or nullifiers are non empty + if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() { + return Err(NssaError::InvalidInput( + "Empty commitments and empty nullifiers found in message".into(), + )); + } + + // 2. Check there are no duplicate account_ids in the public_account_ids list. + if n_unique(&message.public_account_ids) != message.public_account_ids.len() { + return Err(NssaError::InvalidInput( + "Duplicate account_ids found in message".into(), + )); + } + + // Check there are no duplicate nullifiers in the new_nullifiers list + if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() { + return Err(NssaError::InvalidInput( + "Duplicate nullifiers found in message".into(), + )); + } + + // Check there are no duplicate commitments in the new_commitments list + if n_unique(&message.new_commitments) != message.new_commitments.len() { + return Err(NssaError::InvalidInput( + "Duplicate commitments found in message".into(), + )); + } + + // 3. Nonce checks and Valid signatures + // Check exactly one nonce is provided for each signature + if message.nonces.len() != witness_set.signatures_and_public_keys.len() { + return Err(NssaError::InvalidInput( + "Mismatch between number of nonces and signatures/public keys".into(), + )); + } + + // Check the signatures are valid + if !witness_set.signatures_are_valid_for(message) { + return Err(NssaError::InvalidInput( + "Invalid signature for given message and public key".into(), + )); + } + + let signer_account_ids = tx.signer_account_ids(); + // Check nonces corresponds to the current nonces on the public state. + for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) { + let current_nonce = state.get_account_by_id(*account_id).nonce; + if current_nonce != *nonce { + return Err(NssaError::InvalidInput("Nonce mismatch".into())); + } + } + + // Verify validity window + if !message.block_validity_window.is_valid_for(block_id) + || !message.timestamp_validity_window.is_valid_for(timestamp) + { + return Err(NssaError::OutOfValidityWindow); + } + + // Build pre_states for proof verification + let public_pre_states: Vec<_> = message + .public_account_ids + .iter() + .map(|account_id| { + AccountWithMetadata::new( + state.get_account_by_id(*account_id), + signer_account_ids.contains(account_id), + *account_id, + ) + }) + .collect(); + + // 4. Proof verification + check_privacy_preserving_circuit_proof_is_valid( + &witness_set.proof, + &public_pre_states, + message, + )?; + + // 5. Commitment freshness + state.check_commitments_are_new(&message.new_commitments)?; + + // 6. Nullifier uniqueness + state.check_nullifiers_are_valid(&message.new_nullifiers)?; + + let public_diff = message + .public_account_ids + .iter() + .copied() + .zip(message.public_post_states.clone()) + .collect(); + let new_nullifiers = message + .new_nullifiers + .iter() + .copied() + .map(|(nullifier, _)| nullifier) + .collect(); + + Ok(Self(StateDiff { + signer_account_ids, + public_diff, + new_commitments: message.new_commitments.clone(), + new_nullifiers, + program: None, + })) + } + + pub fn from_program_deployment_transaction( + tx: &ProgramDeploymentTransaction, + state: &V03State, + ) -> Result { + // TODO: remove clone + let program = Program::new(tx.message.bytecode.clone())?; + if state.programs().contains_key(&program.id()) { + return Err(NssaError::ProgramAlreadyExists); + } + Ok(Self(StateDiff { + signer_account_ids: vec![], + public_diff: HashMap::new(), + new_commitments: vec![], + new_nullifiers: vec![], + program: Some(program), + })) + } + + /// Returns the public account changes produced by this transaction. + /// + /// Used by callers (e.g. the sequencer) to inspect the diff before committing it, for example + /// to enforce that system accounts are not modified by user transactions. + #[must_use] + pub fn public_diff(&self) -> HashMap { + self.0.public_diff.clone() + } + + pub(crate) fn into_state_diff(self) -> StateDiff { + self.0 + } +} + +fn check_privacy_preserving_circuit_proof_is_valid( + proof: &Proof, + public_pre_states: &[AccountWithMetadata], + message: &Message, +) -> Result<(), NssaError> { + let output = PrivacyPreservingCircuitOutput { + public_pre_states: public_pre_states.to_vec(), + public_post_states: message.public_post_states.clone(), + ciphertexts: message + .encrypted_private_post_states + .iter() + .cloned() + .map(|value| value.ciphertext) + .collect(), + new_commitments: message.new_commitments.clone(), + new_nullifiers: message.new_nullifiers.clone(), + block_validity_window: message.block_validity_window, + timestamp_validity_window: message.timestamp_validity_window, + }; + proof + .is_valid_for(&output) + .then_some(()) + .ok_or(NssaError::InvalidPrivacyPreservingProof) +} + +fn n_unique(data: &[T]) -> usize { + let set: HashSet<&T> = data.iter().collect(); + set.len() +} diff --git a/program_methods/guest/Cargo.toml b/program_methods/guest/Cargo.toml index eabcffc8..dc2077b7 100644 --- a/program_methods/guest/Cargo.toml +++ b/program_methods/guest/Cargo.toml @@ -9,9 +9,12 @@ workspace = true [dependencies] nssa_core.workspace = true +clock_core.workspace = true token_core.workspace = true token_program.workspace = true amm_core.workspace = true amm_program.workspace = true +ata_core.workspace = true +ata_program.workspace = true risc0-zkvm.workspace = true serde = { workspace = true, default-features = false } diff --git a/program_methods/guest/src/bin/amm.rs b/program_methods/guest/src/bin/amm.rs index 00fd39d3..bce76c63 100644 --- a/program_methods/guest/src/bin/amm.rs +++ b/program_methods/guest/src/bin/amm.rs @@ -9,11 +9,13 @@ use std::num::NonZero; use amm_core::Instruction; -use nssa_core::program::{ProgramInput, read_nssa_inputs, write_nssa_outputs_with_chained_call}; +use nssa_core::program::{ProgramInput, ProgramOutput, read_nssa_inputs}; fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction, }, @@ -112,15 +114,15 @@ fn main() { min_amount_to_remove_token_b, ) } - Instruction::Swap { + Instruction::SwapExactInput { swap_amount_in, min_amount_out, token_definition_id_in, } => { let [pool, vault_a, vault_b, user_holding_a, user_holding_b] = pre_states .try_into() - .expect("Transfer instruction requires exactly five accounts"); - amm_program::swap::swap( + .expect("SwapExactInput instruction requires exactly five accounts"); + amm_program::swap::swap_exact_input( pool, vault_a, vault_b, @@ -131,12 +133,34 @@ fn main() { token_definition_id_in, ) } + Instruction::SwapExactOutput { + exact_amount_out, + max_amount_in, + token_definition_id_in, + } => { + let [pool, vault_a, vault_b, user_holding_a, user_holding_b] = pre_states + .try_into() + .expect("SwapExactOutput instruction requires exactly five accounts"); + amm_program::swap::swap_exact_output( + pool, + vault_a, + vault_b, + user_holding_a, + user_holding_b, + exact_amount_out, + max_amount_in, + token_definition_id_in, + ) + } }; - write_nssa_outputs_with_chained_call( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, pre_states_clone, post_states, - chained_calls, - ); + ) + .with_chained_calls(chained_calls) + .write(); } diff --git a/program_methods/guest/src/bin/associated_token_account.rs b/program_methods/guest/src/bin/associated_token_account.rs new file mode 100644 index 00000000..9b155d7f --- /dev/null +++ b/program_methods/guest/src/bin/associated_token_account.rs @@ -0,0 +1,70 @@ +use ata_core::Instruction; +use nssa_core::program::{ProgramInput, ProgramOutput, read_nssa_inputs}; + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction, + }, + instruction_words, + ) = read_nssa_inputs::(); + + let pre_states_clone = pre_states.clone(); + + let (post_states, chained_calls) = match instruction { + Instruction::Create { ata_program_id } => { + let [owner, token_definition, ata_account] = pre_states + .try_into() + .expect("Create instruction requires exactly three accounts"); + ata_program::create::create_associated_token_account( + owner, + token_definition, + ata_account, + ata_program_id, + ) + } + Instruction::Transfer { + ata_program_id, + amount, + } => { + let [owner, sender_ata, recipient] = pre_states + .try_into() + .expect("Transfer instruction requires exactly three accounts"); + ata_program::transfer::transfer_from_associated_token_account( + owner, + sender_ata, + recipient, + ata_program_id, + amount, + ) + } + Instruction::Burn { + ata_program_id, + amount, + } => { + let [owner, holder_ata, token_definition] = pre_states + .try_into() + .expect("Burn instruction requires exactly three accounts"); + ata_program::burn::burn_from_associated_token_account( + owner, + holder_ata, + token_definition, + ata_program_id, + amount, + ) + } + }; + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states_clone, + post_states, + ) + .with_chained_calls(chained_calls) + .write(); +} diff --git a/program_methods/guest/src/bin/authenticated_transfer.rs b/program_methods/guest/src/bin/authenticated_transfer.rs index 7835f733..32b69c3a 100644 --- a/program_methods/guest/src/bin/authenticated_transfer.rs +++ b/program_methods/guest/src/bin/authenticated_transfer.rs @@ -1,13 +1,13 @@ use nssa_core::{ account::{Account, AccountWithMetadata}, program::{ - AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs, + AccountPostState, Claim, DEFAULT_PROGRAM_ID, ProgramInput, ProgramOutput, read_nssa_inputs, }, }; /// Initializes a default account under the ownership of this program. fn initialize_account(pre_state: AccountWithMetadata) -> AccountPostState { - let account_to_claim = AccountPostState::new_claimed(pre_state.account); + let account_to_claim = AccountPostState::new_claimed(pre_state.account, Claim::Authorized); let is_authorized = pre_state.is_authorized; // Continue only if the account to claim has default values @@ -52,7 +52,7 @@ fn transfer( // Claim recipient account if it has default program owner if recipient_post_account.program_owner == DEFAULT_PROGRAM_ID { - AccountPostState::new_claimed(recipient_post_account) + AccountPostState::new_claimed(recipient_post_account, Claim::Authorized) } else { AccountPostState::new(recipient_post_account) } @@ -67,6 +67,8 @@ fn main() { // Read input accounts. let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: balance_to_move, }, @@ -84,5 +86,12 @@ fn main() { _ => panic!("invalid params"), }; - write_nssa_outputs(instruction_words, pre_states, post_states); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states, + post_states, + ) + .write(); } diff --git a/program_methods/guest/src/bin/clock.rs b/program_methods/guest/src/bin/clock.rs new file mode 100644 index 00000000..cb49c384 --- /dev/null +++ b/program_methods/guest/src/bin/clock.rs @@ -0,0 +1,94 @@ +//! Clock Program. +//! +//! A system program that records the current block ID and timestamp into dedicated clock accounts. +//! Three accounts are maintained, updated at different block intervals (every 1, 10, and 50 +//! blocks), allowing programs to read recent timestamps at various granularities. +//! +//! This program can only be invoked exclusively by the sequencer as the last transaction in every +//! block. Clock accounts are assigned to the clock program at genesis, so no claiming is required +//! here. + +use clock_core::{ + CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID, + ClockAccountData, Instruction, +}; +use nssa_core::{ + account::AccountWithMetadata, + program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}, +}; + +fn update_if_multiple( + pre: AccountWithMetadata, + divisor: u64, + current_block_id: u64, + updated_data: &[u8], +) -> (AccountWithMetadata, AccountPostState) { + if current_block_id.is_multiple_of(divisor) { + let mut post_account = pre.account.clone(); + post_account.data = updated_data + .to_vec() + .try_into() + .expect("Clock account data should fit in account data"); + (pre, AccountPostState::new(post_account)) + } else { + let post = AccountPostState::new(pre.account.clone()); + (pre, post) + } +} + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: timestamp, + }, + instruction_words, + ) = read_nssa_inputs::(); + + let Ok([pre_01, pre_10, pre_50]) = <[_; 3]>::try_from(pre_states) else { + panic!("Invalid number of input accounts"); + }; + + // Verify pre-states correspond to the expected clock account IDs. + if pre_01.account_id != CLOCK_01_PROGRAM_ACCOUNT_ID + || pre_10.account_id != CLOCK_10_PROGRAM_ACCOUNT_ID + || pre_50.account_id != CLOCK_50_PROGRAM_ACCOUNT_ID + { + panic!("Invalid input accounts"); + } + + // Verify all clock accounts are owned by this program (assigned at genesis). + if pre_01.account.program_owner != self_program_id + || pre_10.account.program_owner != self_program_id + || pre_50.account.program_owner != self_program_id + { + panic!("Clock accounts must be owned by the clock program"); + } + + let prev_data = ClockAccountData::from_bytes(&pre_01.account.data.clone().into_inner()); + let current_block_id = prev_data + .block_id + .checked_add(1) + .expect("Next block id should be within u64 boundaries"); + + let updated_data = ClockAccountData { + block_id: current_block_id, + timestamp, + } + .to_bytes(); + + let (pre_01, post_01) = update_if_multiple(pre_01, 1, current_block_id, &updated_data); + let (pre_10, post_10) = update_if_multiple(pre_10, 10, current_block_id, &updated_data); + let (pre_50, post_50) = update_if_multiple(pre_50, 50, current_block_id, &updated_data); + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pre_01, pre_10, pre_50], + vec![post_01, post_10, post_50], + ) + .write(); +} diff --git a/program_methods/guest/src/bin/pinata.rs b/program_methods/guest/src/bin/pinata.rs index c9fc0735..dcc76397 100644 --- a/program_methods/guest/src/bin/pinata.rs +++ b/program_methods/guest/src/bin/pinata.rs @@ -1,4 +1,4 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; use risc0_zkvm::sha::{Impl, Sha256 as _}; const PRIZE: u128 = 150; @@ -46,6 +46,8 @@ fn main() { // It is expected to receive only two accounts: [pinata_account, winner_account] let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: solution, }, @@ -78,12 +80,15 @@ fn main() { .checked_add(PRIZE) .expect("Overflow when adding prize to winner"); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pinata, winner], vec![ - AccountPostState::new_claimed_if_default(pinata_post), + AccountPostState::new_claimed_if_default(pinata_post, Claim::Authorized), AccountPostState::new(winner_post), ], - ); + ) + .write(); } diff --git a/program_methods/guest/src/bin/pinata_token.rs b/program_methods/guest/src/bin/pinata_token.rs index f1bbdc87..1f7ad9da 100644 --- a/program_methods/guest/src/bin/pinata_token.rs +++ b/program_methods/guest/src/bin/pinata_token.rs @@ -1,8 +1,7 @@ use nssa_core::{ account::Data, program::{ - AccountPostState, ChainedCall, PdaSeed, ProgramInput, read_nssa_inputs, - write_nssa_outputs_with_chained_call, + AccountPostState, ChainedCall, PdaSeed, ProgramInput, ProgramOutput, read_nssa_inputs, }, }; use risc0_zkvm::sha::{Impl, Sha256 as _}; @@ -53,6 +52,8 @@ fn main() { // winner_token_holding] let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: solution, }, @@ -97,7 +98,9 @@ fn main() { ) .with_pda_seeds(vec![PdaSeed::new([0; 32])]); - write_nssa_outputs_with_chained_call( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![ pinata_definition, @@ -109,6 +112,7 @@ fn main() { AccountPostState::new(pinata_token_holding_post), AccountPostState::new(winner_token_holding_post), ], - vec![chained_call], - ); + ) + .with_chained_calls(vec![chained_call]) + .write(); } diff --git a/program_methods/guest/src/bin/privacy_preserving_circuit.rs b/program_methods/guest/src/bin/privacy_preserving_circuit.rs index 99782d7f..1d091e1c 100644 --- a/program_methods/guest/src/bin/privacy_preserving_circuit.rs +++ b/program_methods/guest/src/bin/privacy_preserving_circuit.rs @@ -10,8 +10,9 @@ use nssa_core::{ account::{Account, AccountId, AccountWithMetadata, Nonce}, compute_digest_for_path, program::{ - AccountPostState, ChainedCall, DEFAULT_PROGRAM_ID, MAX_NUMBER_CHAINED_CALLS, ProgramId, - ProgramOutput, validate_execution, + AccountPostState, BlockValidityWindow, ChainedCall, Claim, DEFAULT_PROGRAM_ID, + MAX_NUMBER_CHAINED_CALLS, ProgramId, ProgramOutput, TimestampValidityWindow, + validate_execution, }, }; use risc0_zkvm::{guest::env, serde::to_vec}; @@ -20,11 +21,53 @@ use risc0_zkvm::{guest::env, serde::to_vec}; struct ExecutionState { pre_states: Vec, post_states: HashMap, + block_validity_window: BlockValidityWindow, + timestamp_validity_window: TimestampValidityWindow, } impl ExecutionState { /// Validate program outputs and derive the overall execution state. - pub fn derive_from_outputs(program_id: ProgramId, program_outputs: Vec) -> Self { + pub fn derive_from_outputs( + visibility_mask: &[u8], + program_id: ProgramId, + program_outputs: Vec, + ) -> Self { + let block_valid_from = program_outputs + .iter() + .filter_map(|output| output.block_validity_window.start()) + .max(); + let block_valid_until = program_outputs + .iter() + .filter_map(|output| output.block_validity_window.end()) + .min(); + let ts_valid_from = program_outputs + .iter() + .filter_map(|output| output.timestamp_validity_window.start()) + .max(); + let ts_valid_until = program_outputs + .iter() + .filter_map(|output| output.timestamp_validity_window.end()) + .min(); + + let block_validity_window: BlockValidityWindow = (block_valid_from, block_valid_until) + .try_into() + .expect( + "There should be non empty intersection in the program output block validity windows", + ); + let timestamp_validity_window: TimestampValidityWindow = + (ts_valid_from, ts_valid_until) + .try_into() + .expect( + "There should be non empty intersection in the program output timestamp validity windows", + ); + + let mut execution_state = Self { + pre_states: Vec::new(), + post_states: HashMap::new(), + block_validity_window, + timestamp_validity_window, + }; + let Some(first_output) = program_outputs.first() else { panic!("No program outputs provided"); }; @@ -37,11 +80,6 @@ impl ExecutionState { }; let mut chained_calls = VecDeque::from_iter([(initial_call, None)]); - let mut execution_state = Self { - pre_states: Vec::new(), - post_states: HashMap::new(), - }; - let mut program_outputs_iter = program_outputs.into_iter(); let mut chain_calls_counter = 0; @@ -69,6 +107,22 @@ impl ExecutionState { |_: Infallible| unreachable!("Infallible error is never constructed"), ); + // Verify that the program output's self_program_id matches the expected program ID. + // This ensures the proof commits to which program produced the output. + assert_eq!( + program_output.self_program_id, chained_call.program_id, + "Program output self_program_id does not match chained call program_id" + ); + + // Verify that the program output's caller_program_id matches the actual caller. + // This prevents a malicious user from privately executing an internal function + // by spoofing caller_program_id (e.g. passing caller_program_id = self_program_id + // to bypass access control checks). + assert_eq!( + program_output.caller_program_id, caller_program_id, + "Program output caller_program_id does not match actual caller" + ); + // Check that the program is well behaved. // See the # Programs section for the definition of the `validate_execution` method. let execution_valid = validate_execution( @@ -87,6 +141,7 @@ impl ExecutionState { &chained_call.pda_seeds, ); execution_state.validate_and_sync_states( + visibility_mask, chained_call.program_id, &authorized_pdas, program_output.pre_states, @@ -119,7 +174,7 @@ impl ExecutionState { { assert_ne!( post.program_owner, DEFAULT_PROGRAM_ID, - "Account {account_id:?} was modified but not claimed" + "Account {account_id} was modified but not claimed" ); } @@ -129,6 +184,7 @@ impl ExecutionState { /// Validate program pre and post states and populate the execution state. fn validate_and_sync_states( &mut self, + visibility_mask: &[u8], program_id: ProgramId, authorized_pdas: &HashSet, pre_states: Vec, @@ -136,14 +192,25 @@ impl ExecutionState { ) { for (pre, mut post) in pre_states.into_iter().zip(post_states) { let pre_account_id = pre.account_id; + let pre_is_authorized = pre.is_authorized; let post_states_entry = self.post_states.entry(pre.account_id); match &post_states_entry { Entry::Occupied(occupied) => { + #[expect( + clippy::shadow_unrelated, + reason = "Shadowing is intentional to use all fields" + )] + let AccountWithMetadata { + account: pre_account, + account_id: pre_account_id, + is_authorized: pre_is_authorized, + } = pre; + // Ensure that new pre state is the same as known post state assert_eq!( occupied.get(), - &pre.account, - "Inconsistent pre state for account {pre_account_id:?}", + &pre_account, + "Inconsistent pre state for account {pre_account_id}", ); let previous_is_authorized = self @@ -152,7 +219,7 @@ impl ExecutionState { .find(|acc| acc.account_id == pre_account_id) .map_or_else( || panic!( - "Pre state must exist in execution state for account {pre_account_id:?}", + "Pre state must exist in execution state for account {pre_account_id}", ), |acc| acc.is_authorized ); @@ -161,22 +228,57 @@ impl ExecutionState { previous_is_authorized || authorized_pdas.contains(&pre_account_id); assert_eq!( - pre.is_authorized, is_authorized, - "Inconsistent authorization for account {pre_account_id:?}", + pre_is_authorized, is_authorized, + "Inconsistent authorization for account {pre_account_id}", ); } Entry::Vacant(_) => { + // Pre state for the initial call self.pre_states.push(pre); } } - if post.requires_claim() { + if let Some(claim) = post.required_claim() { // The invoked program can only claim accounts with default program id. - if post.account().program_owner == DEFAULT_PROGRAM_ID { - post.account_mut().program_owner = program_id; + assert_eq!( + post.account().program_owner, + DEFAULT_PROGRAM_ID, + "Cannot claim an initialized account {pre_account_id}" + ); + + let pre_state_position = self + .pre_states + .iter() + .position(|acc| acc.account_id == pre_account_id) + .expect("Pre state must exist at this point"); + + let is_public_account = visibility_mask[pre_state_position] == 0; + if is_public_account { + match claim { + Claim::Authorized => { + // Note: no need to check authorized pdas because we have already + // checked consistency of authorization above. + assert!( + pre_is_authorized, + "Cannot claim unauthorized account {pre_account_id}" + ); + } + Claim::Pda(seed) => { + let pda = AccountId::from((&program_id, &seed)); + assert_eq!( + pre_account_id, pda, + "Invalid PDA claim for account {pre_account_id} which does not match derived PDA {pda}" + ); + } + } } else { - panic!("Cannot claim an initialized account {pre_account_id:?}"); + // We don't care about the exact claim mechanism for private accounts. + // This is because the main reason to have it is to protect against PDA griefing + // attacks in public execution, while private PDA doesn't make much sense + // anyway. } + + post.account_mut().program_owner = program_id; } post_states_entry.insert_entry(post.into_account()); @@ -210,6 +312,8 @@ fn compute_circuit_output( ciphertexts: Vec::new(), new_commitments: Vec::new(), new_nullifiers: Vec::new(), + block_validity_window: execution_state.block_validity_window, + timestamp_validity_window: execution_state.timestamp_validity_window, }; let states_iter = execution_state.into_states_iter(); @@ -392,7 +496,8 @@ fn main() { program_id, } = env::read(); - let execution_state = ExecutionState::derive_from_outputs(program_id, program_outputs); + let execution_state = + ExecutionState::derive_from_outputs(&visibility_mask, program_id, program_outputs); let output = compute_circuit_output( execution_state, diff --git a/program_methods/guest/src/bin/token.rs b/program_methods/guest/src/bin/token.rs index 0bc3d245..68205d77 100644 --- a/program_methods/guest/src/bin/token.rs +++ b/program_methods/guest/src/bin/token.rs @@ -6,12 +6,14 @@ //! Token program accepts [`Instruction`] as input, refer to the corresponding documentation //! for more details. -use nssa_core::program::{ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{ProgramInput, ProgramOutput, read_nssa_inputs}; use token_program::core::Instruction; fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction, }, @@ -81,5 +83,12 @@ fn main() { } }; - write_nssa_outputs(instruction_words, pre_states_clone, post_states); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states_clone, + post_states, + ) + .write(); } diff --git a/programs/amm/Cargo.toml b/programs/amm/Cargo.toml index 30074ac8..4fcadb9f 100644 --- a/programs/amm/Cargo.toml +++ b/programs/amm/Cargo.toml @@ -8,12 +8,9 @@ license = { workspace = true } workspace = true [dependencies] -nssa = { workspace = true, optional = true, features = [ - "test-utils", -], default-features = true } nssa_core.workspace = true token_core.workspace = true amm_core.workspace = true -[features] -nssa = ["dep:nssa"] +[dev-dependencies] +nssa = { workspace = true, features = ["test-utils"] } diff --git a/programs/amm/core/src/lib.rs b/programs/amm/core/src/lib.rs index 85efd00d..017f14ff 100644 --- a/programs/amm/core/src/lib.rs +++ b/programs/amm/core/src/lib.rs @@ -68,11 +68,27 @@ pub enum Instruction { /// - User Holding Account for Token A /// - User Holding Account for Token B Either User Holding Account for Token A or Token B is /// authorized. - Swap { + SwapExactInput { swap_amount_in: u128, min_amount_out: u128, token_definition_id_in: AccountId, }, + + /// Swap tokens specifying the exact desired output amount, + /// while maintaining the Pool constant product. + /// + /// Required accounts: + /// - AMM Pool (initialized) + /// - Vault Holding Account for Token A (initialized) + /// - Vault Holding Account for Token B (initialized) + /// - User Holding Account for Token A + /// - User Holding Account for Token B Either User Holding Account for Token A or Token B is + /// authorized. + SwapExactOutput { + exact_amount_out: u128, + max_amount_in: u128, + token_definition_id_in: AccountId, + }, } #[derive(Clone, Default, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] diff --git a/programs/amm/src/new_definition.rs b/programs/amm/src/new_definition.rs index 366eb747..88263b87 100644 --- a/programs/amm/src/new_definition.rs +++ b/programs/amm/src/new_definition.rs @@ -2,11 +2,11 @@ use std::num::NonZeroU128; use amm_core::{ PoolDefinition, compute_liquidity_token_pda, compute_liquidity_token_pda_seed, - compute_pool_pda, compute_vault_pda, + compute_pool_pda, compute_pool_pda_seed, compute_vault_pda, compute_vault_pda_seed, }; use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::{AccountPostState, ChainedCall, ProgramId}, + program::{AccountPostState, ChainedCall, Claim, ProgramId}, }; #[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] @@ -108,36 +108,52 @@ pub fn new_definition( }; pool_post.data = Data::from(&pool_post_definition); - let pool_post = AccountPostState::new_claimed_if_default(pool_post); + let pool_pda_seed = compute_pool_pda_seed(definition_token_a_id, definition_token_b_id); + let pool_post = AccountPostState::new_claimed_if_default(pool_post, Claim::Pda(pool_pda_seed)); let token_program_id = user_holding_a.account.program_owner; // Chain call for Token A (user_holding_a -> Vault_A) + let vault_a_seed = compute_vault_pda_seed(pool.account_id, definition_token_a_id); + let vault_a_authorized = AccountWithMetadata { + is_authorized: true, + ..vault_a.clone() + }; let call_token_a = ChainedCall::new( token_program_id, - vec![user_holding_a.clone(), vault_a.clone()], + vec![user_holding_a.clone(), vault_a_authorized], &token_core::Instruction::Transfer { amount_to_transfer: token_a_amount.into(), }, - ); + ) + .with_pda_seeds(vec![vault_a_seed]); + // Chain call for Token B (user_holding_b -> Vault_B) + let vault_b_seed = compute_vault_pda_seed(pool.account_id, definition_token_b_id); + let vault_b_authorized = AccountWithMetadata { + is_authorized: true, + ..vault_b.clone() + }; let call_token_b = ChainedCall::new( token_program_id, - vec![user_holding_b.clone(), vault_b.clone()], + vec![user_holding_b.clone(), vault_b_authorized], &token_core::Instruction::Transfer { amount_to_transfer: token_b_amount.into(), }, - ); - - let mut pool_lp_auth = pool_definition_lp.clone(); - pool_lp_auth.is_authorized = true; + ) + .with_pda_seeds(vec![vault_b_seed]); + let pool_lp_pda_seed = compute_liquidity_token_pda_seed(pool.account_id); + let pool_lp_authorized = AccountWithMetadata { + is_authorized: true, + ..pool_definition_lp.clone() + }; let call_token_lp = ChainedCall::new( token_program_id, - vec![pool_lp_auth, user_holding_lp.clone()], + vec![pool_lp_authorized, user_holding_lp.clone()], &instruction, ) - .with_pda_seeds(vec![compute_liquidity_token_pda_seed(pool.account_id)]); + .with_pda_seeds(vec![pool_lp_pda_seed]); let chained_calls = vec![call_token_lp, call_token_b, call_token_a]; diff --git a/programs/amm/src/swap.rs b/programs/amm/src/swap.rs index cb64f5eb..22f3792a 100644 --- a/programs/amm/src/swap.rs +++ b/programs/amm/src/swap.rs @@ -4,21 +4,14 @@ use nssa_core::{ program::{AccountPostState, ChainedCall}, }; -#[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] -#[must_use] -pub fn swap( - pool: AccountWithMetadata, - vault_a: AccountWithMetadata, - vault_b: AccountWithMetadata, - user_holding_a: AccountWithMetadata, - user_holding_b: AccountWithMetadata, - swap_amount_in: u128, - min_amount_out: u128, - token_in_id: AccountId, -) -> (Vec, Vec) { - // Verify vaults are in fact vaults +/// Validates swap setup: checks pool is active, vaults match, and reserves are sufficient. +fn validate_swap_setup( + pool: &AccountWithMetadata, + vault_a: &AccountWithMetadata, + vault_b: &AccountWithMetadata, +) -> PoolDefinition { let pool_def_data = PoolDefinition::try_from(&pool.account.data) - .expect("Swap: AMM Program expects a valid Pool Definition Account"); + .expect("AMM Program expects a valid Pool Definition Account"); assert!(pool_def_data.active, "Pool is inactive"); assert_eq!( @@ -30,16 +23,14 @@ pub fn swap( "Vault B was not provided" ); - // fetch pool reserves - // validates reserves is at least the vaults' balances let vault_a_token_holding = token_core::TokenHolding::try_from(&vault_a.account.data) - .expect("Swap: AMM Program expects a valid Token Holding Account for Vault A"); + .expect("AMM Program expects a valid Token Holding Account for Vault A"); let token_core::TokenHolding::Fungible { definition_id: _, balance: vault_a_balance, } = vault_a_token_holding else { - panic!("Swap: AMM Program expects a valid Fungible Token Holding Account for Vault A"); + panic!("AMM Program expects a valid Fungible Token Holding Account for Vault A"); }; assert!( @@ -48,13 +39,13 @@ pub fn swap( ); let vault_b_token_holding = token_core::TokenHolding::try_from(&vault_b.account.data) - .expect("Swap: AMM Program expects a valid Token Holding Account for Vault B"); + .expect("AMM Program expects a valid Token Holding Account for Vault B"); let token_core::TokenHolding::Fungible { definition_id: _, balance: vault_b_balance, } = vault_b_token_holding else { - panic!("Swap: AMM Program expects a valid Fungible Token Holding Account for Vault B"); + panic!("AMM Program expects a valid Fungible Token Holding Account for Vault B"); }; assert!( @@ -62,6 +53,59 @@ pub fn swap( "Reserve for Token B exceeds vault balance" ); + pool_def_data +} + +/// Creates post-state and returns reserves after swap. +#[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] +#[expect( + clippy::needless_pass_by_value, + reason = "consistent with codebase style" +)] +fn create_swap_post_states( + pool: AccountWithMetadata, + pool_def_data: PoolDefinition, + vault_a: AccountWithMetadata, + vault_b: AccountWithMetadata, + user_holding_a: AccountWithMetadata, + user_holding_b: AccountWithMetadata, + deposit_a: u128, + withdraw_a: u128, + deposit_b: u128, + withdraw_b: u128, +) -> Vec { + let mut pool_post = pool.account; + let pool_post_definition = PoolDefinition { + reserve_a: pool_def_data.reserve_a + deposit_a - withdraw_a, + reserve_b: pool_def_data.reserve_b + deposit_b - withdraw_b, + ..pool_def_data + }; + + pool_post.data = Data::from(&pool_post_definition); + + vec![ + AccountPostState::new(pool_post), + AccountPostState::new(vault_a.account), + AccountPostState::new(vault_b.account), + AccountPostState::new(user_holding_a.account), + AccountPostState::new(user_holding_b.account), + ] +} + +#[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] +#[must_use] +pub fn swap_exact_input( + pool: AccountWithMetadata, + vault_a: AccountWithMetadata, + vault_b: AccountWithMetadata, + user_holding_a: AccountWithMetadata, + user_holding_b: AccountWithMetadata, + swap_amount_in: u128, + min_amount_out: u128, + token_in_id: AccountId, +) -> (Vec, Vec) { + let pool_def_data = validate_swap_setup(&pool, &vault_a, &vault_b); + let (chained_calls, [deposit_a, withdraw_a], [deposit_b, withdraw_b]) = if token_in_id == pool_def_data.definition_token_a_id { let (chained_calls, deposit_a, withdraw_b) = swap_logic( @@ -95,23 +139,18 @@ pub fn swap( panic!("AccountId is not a token type for the pool"); }; - // Update pool account - let mut pool_post = pool.account; - let pool_post_definition = PoolDefinition { - reserve_a: pool_def_data.reserve_a + deposit_a - withdraw_a, - reserve_b: pool_def_data.reserve_b + deposit_b - withdraw_b, - ..pool_def_data - }; - - pool_post.data = Data::from(&pool_post_definition); - - let post_states = vec![ - AccountPostState::new(pool_post), - AccountPostState::new(vault_a.account), - AccountPostState::new(vault_b.account), - AccountPostState::new(user_holding_a.account), - AccountPostState::new(user_holding_b.account), - ]; + let post_states = create_swap_post_states( + pool, + pool_def_data, + vault_a, + vault_b, + user_holding_a, + user_holding_b, + deposit_a, + withdraw_a, + deposit_b, + withdraw_b, + ); (post_states, chained_calls) } @@ -131,7 +170,9 @@ fn swap_logic( // Compute withdraw amount // Maintains pool constant product // k = pool_def_data.reserve_a * pool_def_data.reserve_b; - let withdraw_amount = (reserve_withdraw_vault_amount * swap_amount_in) + let withdraw_amount = reserve_withdraw_vault_amount + .checked_mul(swap_amount_in) + .expect("reserve * amount_in overflows u128") / (reserve_deposit_vault_amount + swap_amount_in); // Slippage check @@ -175,3 +216,135 @@ fn swap_logic( (chained_calls, swap_amount_in, withdraw_amount) } + +#[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] +#[must_use] +pub fn swap_exact_output( + pool: AccountWithMetadata, + vault_a: AccountWithMetadata, + vault_b: AccountWithMetadata, + user_holding_a: AccountWithMetadata, + user_holding_b: AccountWithMetadata, + exact_amount_out: u128, + max_amount_in: u128, + token_in_id: AccountId, +) -> (Vec, Vec) { + let pool_def_data = validate_swap_setup(&pool, &vault_a, &vault_b); + + let (chained_calls, [deposit_a, withdraw_a], [deposit_b, withdraw_b]) = + if token_in_id == pool_def_data.definition_token_a_id { + let (chained_calls, deposit_a, withdraw_b) = exact_output_swap_logic( + user_holding_a.clone(), + vault_a.clone(), + vault_b.clone(), + user_holding_b.clone(), + exact_amount_out, + max_amount_in, + pool_def_data.reserve_a, + pool_def_data.reserve_b, + pool.account_id, + ); + + (chained_calls, [deposit_a, 0], [0, withdraw_b]) + } else if token_in_id == pool_def_data.definition_token_b_id { + let (chained_calls, deposit_b, withdraw_a) = exact_output_swap_logic( + user_holding_b.clone(), + vault_b.clone(), + vault_a.clone(), + user_holding_a.clone(), + exact_amount_out, + max_amount_in, + pool_def_data.reserve_b, + pool_def_data.reserve_a, + pool.account_id, + ); + + (chained_calls, [0, withdraw_a], [deposit_b, 0]) + } else { + panic!("AccountId is not a token type for the pool"); + }; + + let post_states = create_swap_post_states( + pool, + pool_def_data, + vault_a, + vault_b, + user_holding_a, + user_holding_b, + deposit_a, + withdraw_a, + deposit_b, + withdraw_b, + ); + + (post_states, chained_calls) +} + +#[expect(clippy::too_many_arguments, reason = "TODO: Fix later")] +fn exact_output_swap_logic( + user_deposit: AccountWithMetadata, + vault_deposit: AccountWithMetadata, + vault_withdraw: AccountWithMetadata, + user_withdraw: AccountWithMetadata, + exact_amount_out: u128, + max_amount_in: u128, + reserve_deposit_vault_amount: u128, + reserve_withdraw_vault_amount: u128, + pool_id: AccountId, +) -> (Vec, u128, u128) { + // Guard: exact_amount_out must be nonzero + assert_ne!(exact_amount_out, 0, "Exact amount out must be nonzero"); + + // Guard: exact_amount_out must be less than reserve_withdraw_vault_amount + assert!( + exact_amount_out < reserve_withdraw_vault_amount, + "Exact amount out exceeds reserve" + ); + + // Compute deposit amount using ceiling division + // Formula: amount_in = ceil(reserve_in * exact_amount_out / (reserve_out - exact_amount_out)) + let deposit_amount = reserve_deposit_vault_amount + .checked_mul(exact_amount_out) + .expect("reserve * amount_out overflows u128") + .div_ceil(reserve_withdraw_vault_amount - exact_amount_out); + + // Slippage check + assert!( + deposit_amount <= max_amount_in, + "Required input exceeds maximum amount in" + ); + + let token_program_id = user_deposit.account.program_owner; + + let mut chained_calls = Vec::new(); + chained_calls.push(ChainedCall::new( + token_program_id, + vec![user_deposit, vault_deposit], + &token_core::Instruction::Transfer { + amount_to_transfer: deposit_amount, + }, + )); + + let mut vault_withdraw = vault_withdraw; + vault_withdraw.is_authorized = true; + + let pda_seed = compute_vault_pda_seed( + pool_id, + token_core::TokenHolding::try_from(&vault_withdraw.account.data) + .expect("Exact Output Swap Logic: AMM Program expects valid token data") + .definition_id(), + ); + + chained_calls.push( + ChainedCall::new( + token_program_id, + vec![vault_withdraw, user_withdraw], + &token_core::Instruction::Transfer { + amount_to_transfer: exact_amount_out, + }, + ) + .with_pda_seeds(vec![pda_seed]), + ); + + (chained_calls, deposit_amount, exact_amount_out) +} diff --git a/programs/amm/src/tests.rs b/programs/amm/src/tests.rs index e1e8698d..43e20168 100644 --- a/programs/amm/src/tests.rs +++ b/programs/amm/src/tests.rs @@ -1,12 +1,11 @@ -use std::num::NonZero; +use std::{num::NonZero, vec}; use amm_core::{ PoolDefinition, compute_liquidity_token_pda, compute_liquidity_token_pda_seed, compute_pool_pda, compute_vault_pda, compute_vault_pda_seed, }; -#[cfg(feature = "nssa")] use nssa::{ - PrivateKey, PublicKey, PublicTransaction, V02State, program::Program, public_transaction, + PrivateKey, PublicKey, PublicTransaction, V03State, program::Program, public_transaction, }; use nssa_core::{ account::{Account, AccountId, AccountWithMetadata, Data}, @@ -15,7 +14,10 @@ use nssa_core::{ use token_core::{TokenDefinition, TokenHolding}; use crate::{ - add::add_liquidity, new_definition::new_definition, remove::remove_liquidity, swap::swap, + add::add_liquidity, + new_definition::new_definition, + remove::remove_liquidity, + swap::{swap_exact_input, swap_exact_output}, }; const TOKEN_PROGRAM_ID: ProgramId = [15; 8]; @@ -25,16 +27,15 @@ struct BalanceForTests; struct ChainedCallForTests; struct IdForTests; struct AccountWithMetadataForTests; -#[cfg(feature = "nssa")] + struct PrivateKeysForTests; -#[cfg(feature = "nssa")] + struct IdForExeTests; -#[cfg(feature = "nssa")] + struct BalanceForExeTests; -#[cfg(feature = "nssa")] + struct AccountsForExeTests; -#[cfg(feature = "nssa")] impl PrivateKeysForTests { fn user_token_a_key() -> PrivateKey { PrivateKey::try_new([31; 32]).expect("Keys constructor expects valid private key") @@ -155,6 +156,10 @@ impl BalanceForTests { 200 } + fn max_amount_in() -> u128 { + 166 + } + fn vault_a_add_successful() -> u128 { 1_400 } @@ -245,6 +250,74 @@ impl ChainedCallForTests { ) } + fn cc_swap_exact_output_token_a_test_1() -> ChainedCall { + let swap_amount: u128 = 498; + + ChainedCall::new( + TOKEN_PROGRAM_ID, + vec![ + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::vault_a_init(), + ], + &token_core::Instruction::Transfer { + amount_to_transfer: swap_amount, + }, + ) + } + + fn cc_swap_exact_output_token_b_test_1() -> ChainedCall { + let swap_amount: u128 = 166; + + let mut vault_b_auth = AccountWithMetadataForTests::vault_b_init(); + vault_b_auth.is_authorized = true; + + ChainedCall::new( + TOKEN_PROGRAM_ID, + vec![vault_b_auth, AccountWithMetadataForTests::user_holding_b()], + &token_core::Instruction::Transfer { + amount_to_transfer: swap_amount, + }, + ) + .with_pda_seeds(vec![compute_vault_pda_seed( + IdForTests::pool_definition_id(), + IdForTests::token_b_definition_id(), + )]) + } + + fn cc_swap_exact_output_token_a_test_2() -> ChainedCall { + let swap_amount: u128 = 285; + + let mut vault_a_auth = AccountWithMetadataForTests::vault_a_init(); + vault_a_auth.is_authorized = true; + + ChainedCall::new( + TOKEN_PROGRAM_ID, + vec![vault_a_auth, AccountWithMetadataForTests::user_holding_a()], + &token_core::Instruction::Transfer { + amount_to_transfer: swap_amount, + }, + ) + .with_pda_seeds(vec![compute_vault_pda_seed( + IdForTests::pool_definition_id(), + IdForTests::token_a_definition_id(), + )]) + } + + fn cc_swap_exact_output_token_b_test_2() -> ChainedCall { + let swap_amount: u128 = 200; + + ChainedCall::new( + TOKEN_PROGRAM_ID, + vec![ + AccountWithMetadataForTests::user_holding_b(), + AccountWithMetadataForTests::vault_b_init(), + ], + &token_core::Instruction::Transfer { + amount_to_transfer: swap_amount, + }, + ) + } + fn cc_add_token_a() -> ChainedCall { ChainedCall::new( TOKEN_PROGRAM_ID, @@ -831,6 +904,54 @@ impl AccountWithMetadataForTests { } } + fn pool_definition_swap_exact_output_test_1() -> AccountWithMetadata { + AccountWithMetadata { + account: Account { + program_owner: ProgramId::default(), + balance: 0_u128, + data: Data::from(&PoolDefinition { + definition_token_a_id: IdForTests::token_a_definition_id(), + definition_token_b_id: IdForTests::token_b_definition_id(), + vault_a_id: IdForTests::vault_a_id(), + vault_b_id: IdForTests::vault_b_id(), + liquidity_pool_id: IdForTests::token_lp_definition_id(), + liquidity_pool_supply: BalanceForTests::lp_supply_init(), + reserve_a: 1498_u128, + reserve_b: 334_u128, + fees: 0_u128, + active: true, + }), + nonce: 0_u128.into(), + }, + is_authorized: true, + account_id: IdForTests::pool_definition_id(), + } + } + + fn pool_definition_swap_exact_output_test_2() -> AccountWithMetadata { + AccountWithMetadata { + account: Account { + program_owner: ProgramId::default(), + balance: 0_u128, + data: Data::from(&PoolDefinition { + definition_token_a_id: IdForTests::token_a_definition_id(), + definition_token_b_id: IdForTests::token_b_definition_id(), + vault_a_id: IdForTests::vault_a_id(), + vault_b_id: IdForTests::vault_b_id(), + liquidity_pool_id: IdForTests::token_lp_definition_id(), + liquidity_pool_supply: BalanceForTests::lp_supply_init(), + reserve_a: BalanceForTests::vault_a_swap_test_2(), + reserve_b: BalanceForTests::vault_b_swap_test_2(), + fees: 0_u128, + active: true, + }), + nonce: 0_u128.into(), + }, + is_authorized: true, + account_id: IdForTests::pool_definition_id(), + } + } + fn pool_definition_add_zero_lp() -> AccountWithMetadata { AccountWithMetadata { account: Account { @@ -1008,7 +1129,6 @@ impl AccountWithMetadataForTests { } } -#[cfg(feature = "nssa")] impl BalanceForExeTests { fn user_token_a_holding_init() -> u128 { 10_000 @@ -1172,7 +1292,6 @@ impl BalanceForExeTests { } } -#[cfg(feature = "nssa")] impl IdForExeTests { fn pool_definition_id() -> AccountId { amm_core::compute_pool_pda( @@ -1229,7 +1348,6 @@ impl IdForExeTests { } } -#[cfg(feature = "nssa")] impl AccountsForExeTests { fn user_token_a_holding() -> Account { Account { @@ -1761,7 +1879,7 @@ impl AccountsForExeTests { definition_id: IdForExeTests::token_lp_definition_id(), balance: BalanceForExeTests::lp_supply_init(), }), - nonce: 0_u128.into(), + nonce: 1_u128.into(), } } @@ -1806,7 +1924,7 @@ impl AccountsForExeTests { definition_id: IdForExeTests::token_lp_definition_id(), balance: 0, }), - nonce: 0_u128.into(), + nonce: 1.into(), } } } @@ -2405,7 +2523,7 @@ fn call_new_definition_chained_call_successful() { #[should_panic(expected = "AccountId is not a token type for the pool")] #[test] fn call_swap_incorrect_token_type() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init(), @@ -2420,7 +2538,7 @@ fn call_swap_incorrect_token_type() { #[should_panic(expected = "Vault A was not provided")] #[test] fn call_swap_vault_a_omitted() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_with_wrong_id(), AccountWithMetadataForTests::vault_b_init(), @@ -2435,7 +2553,7 @@ fn call_swap_vault_a_omitted() { #[should_panic(expected = "Vault B was not provided")] #[test] fn call_swap_vault_b_omitted() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_with_wrong_id(), @@ -2450,7 +2568,7 @@ fn call_swap_vault_b_omitted() { #[should_panic(expected = "Reserve for Token A exceeds vault balance")] #[test] fn call_swap_reserves_vault_mismatch_1() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init_low(), AccountWithMetadataForTests::vault_b_init(), @@ -2465,7 +2583,7 @@ fn call_swap_reserves_vault_mismatch_1() { #[should_panic(expected = "Reserve for Token B exceeds vault balance")] #[test] fn call_swap_reserves_vault_mismatch_2() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init_low(), @@ -2480,7 +2598,7 @@ fn call_swap_reserves_vault_mismatch_2() { #[should_panic(expected = "Pool is inactive")] #[test] fn call_swap_ianctive() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_inactive(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init(), @@ -2495,7 +2613,7 @@ fn call_swap_ianctive() { #[should_panic(expected = "Withdraw amount is less than minimal amount out")] #[test] fn call_swap_below_min_out() { - let _post_states = swap( + let _post_states = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init(), @@ -2509,7 +2627,7 @@ fn call_swap_below_min_out() { #[test] fn call_swap_chained_call_successful_1() { - let (post_states, chained_calls) = swap( + let (post_states, chained_calls) = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init(), @@ -2541,7 +2659,7 @@ fn call_swap_chained_call_successful_1() { #[test] fn call_swap_chained_call_successful_2() { - let (post_states, chained_calls) = swap( + let (post_states, chained_calls) = swap_exact_input( AccountWithMetadataForTests::pool_definition_init(), AccountWithMetadataForTests::vault_a_init(), AccountWithMetadataForTests::vault_b_init(), @@ -2571,6 +2689,281 @@ fn call_swap_chained_call_successful_2() { ); } +#[should_panic(expected = "AccountId is not a token type for the pool")] +#[test] +fn call_swap_exact_output_incorrect_token_type() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_lp_definition_id(), + ); +} + +#[should_panic(expected = "Vault A was not provided")] +#[test] +fn call_swap_exact_output_vault_a_omitted() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_with_wrong_id(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Vault B was not provided")] +#[test] +fn call_swap_exact_output_vault_b_omitted() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_with_wrong_id(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Reserve for Token A exceeds vault balance")] +#[test] +fn call_swap_exact_output_reserves_vault_mismatch_1() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init_low(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Reserve for Token B exceeds vault balance")] +#[test] +fn call_swap_exact_output_reserves_vault_mismatch_2() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init_low(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Pool is inactive")] +#[test] +fn call_swap_exact_output_inactive() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_inactive(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::add_max_amount_a(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Required input exceeds maximum amount in")] +#[test] +fn call_swap_exact_output_exceeds_max_in() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + 166_u128, + 100_u128, + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Exact amount out must be nonzero")] +#[test] +fn call_swap_exact_output_zero() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + 0_u128, + 500_u128, + IdForTests::token_a_definition_id(), + ); +} + +#[should_panic(expected = "Exact amount out exceeds reserve")] +#[test] +fn call_swap_exact_output_exceeds_reserve() { + let _post_states = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::vault_b_reserve_init(), + BalanceForTests::max_amount_in(), + IdForTests::token_a_definition_id(), + ); +} + +#[test] +fn call_swap_exact_output_chained_call_successful() { + let (post_states, chained_calls) = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + BalanceForTests::max_amount_in(), + BalanceForTests::vault_b_reserve_init(), + IdForTests::token_a_definition_id(), + ); + + let pool_post = post_states[0].clone(); + + assert!( + AccountWithMetadataForTests::pool_definition_swap_exact_output_test_1().account + == *pool_post.account() + ); + + let chained_call_a = chained_calls[0].clone(); + let chained_call_b = chained_calls[1].clone(); + + assert_eq!( + chained_call_a, + ChainedCallForTests::cc_swap_exact_output_token_a_test_1() + ); + assert_eq!( + chained_call_b, + ChainedCallForTests::cc_swap_exact_output_token_b_test_1() + ); +} + +#[test] +fn call_swap_exact_output_chained_call_successful_2() { + let (post_states, chained_calls) = swap_exact_output( + AccountWithMetadataForTests::pool_definition_init(), + AccountWithMetadataForTests::vault_a_init(), + AccountWithMetadataForTests::vault_b_init(), + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + 285, + 300, + IdForTests::token_b_definition_id(), + ); + + let pool_post = post_states[0].clone(); + + assert!( + AccountWithMetadataForTests::pool_definition_swap_exact_output_test_2().account + == *pool_post.account() + ); + + let chained_call_a = chained_calls[1].clone(); + let chained_call_b = chained_calls[0].clone(); + + assert_eq!( + chained_call_a, + ChainedCallForTests::cc_swap_exact_output_token_a_test_2() + ); + assert_eq!( + chained_call_b, + ChainedCallForTests::cc_swap_exact_output_token_b_test_2() + ); +} + +// Without the fix, `reserve_a * exact_amount_out` silently wraps to 0 in release mode, +// making `deposit_amount = 0`. The slippage check `0 <= max_amount_in` always passes, +// so an attacker receives `exact_amount_out` tokens while paying nothing. +#[should_panic(expected = "reserve * amount_out overflows u128")] +#[test] +fn swap_exact_output_overflow_protection() { + // reserve_a chosen so that reserve_a * 2 overflows u128: + // (u128::MAX / 2 + 1) * 2 = u128::MAX + 1 → wraps to 0 + let large_reserve: u128 = u128::MAX / 2 + 1; + let reserve_b: u128 = 1_000; + + let pool = AccountWithMetadata { + account: Account { + program_owner: ProgramId::default(), + balance: 0, + data: Data::from(&PoolDefinition { + definition_token_a_id: IdForTests::token_a_definition_id(), + definition_token_b_id: IdForTests::token_b_definition_id(), + vault_a_id: IdForTests::vault_a_id(), + vault_b_id: IdForTests::vault_b_id(), + liquidity_pool_id: IdForTests::token_lp_definition_id(), + liquidity_pool_supply: 1, + reserve_a: large_reserve, + reserve_b, + fees: 0, + active: true, + }), + nonce: 0_u128.into(), + }, + is_authorized: true, + account_id: IdForTests::pool_definition_id(), + }; + + let vault_a = AccountWithMetadata { + account: Account { + program_owner: TOKEN_PROGRAM_ID, + balance: 0, + data: Data::from(&TokenHolding::Fungible { + definition_id: IdForTests::token_a_definition_id(), + balance: large_reserve, + }), + nonce: 0_u128.into(), + }, + is_authorized: true, + account_id: IdForTests::vault_a_id(), + }; + + let vault_b = AccountWithMetadata { + account: Account { + program_owner: TOKEN_PROGRAM_ID, + balance: 0, + data: Data::from(&TokenHolding::Fungible { + definition_id: IdForTests::token_b_definition_id(), + balance: reserve_b, + }), + nonce: 0_u128.into(), + }, + is_authorized: true, + account_id: IdForTests::vault_b_id(), + }; + + let _result = swap_exact_output( + pool, + vault_a, + vault_b, + AccountWithMetadataForTests::user_holding_a(), + AccountWithMetadataForTests::user_holding_b(), + 2, // exact_amount_out: small, valid (< reserve_b) + 1, // max_amount_in: tiny — real deposit would be enormous, but + // overflow wraps it to 0, making 0 <= 1 pass silently + IdForTests::token_a_definition_id(), + ); +} + #[test] fn new_definition_lp_asymmetric_amounts() { let (post_states, chained_calls) = new_definition( @@ -2641,10 +3034,9 @@ fn new_definition_lp_symmetric_amounts() { assert_eq!(chained_call_lp, expected_lp_call); } -#[cfg(feature = "nssa")] -fn state_for_amm_tests() -> V02State { +fn state_for_amm_tests() -> V03State { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); state.force_insert_account( IdForExeTests::pool_definition_id(), AccountsForExeTests::pool_definition_init(), @@ -2685,10 +3077,9 @@ fn state_for_amm_tests() -> V02State { state } -#[cfg(feature = "nssa")] -fn state_for_amm_tests_with_new_def() -> V02State { +fn state_for_amm_tests_with_new_def() -> V03State { let initial_data = []; - let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); + let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0); state.force_insert_account( IdForExeTests::token_a_definition_id(), AccountsForExeTests::token_a_definition_account(), @@ -2708,7 +3099,6 @@ fn state_for_amm_tests_with_new_def() -> V02State { state } -#[cfg(feature = "nssa")] #[test] fn simple_amm_remove() { let mut state = state_for_amm_tests(); @@ -2741,7 +3131,7 @@ fn simple_amm_remove() { ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -2768,7 +3158,6 @@ fn simple_amm_remove() { assert_eq!(user_token_lp_post, expected_user_token_lp); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() { let mut state = state_for_amm_tests_with_new_def(); @@ -2808,7 +3197,7 @@ fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() { IdForExeTests::user_token_b_id(), IdForExeTests::user_token_lp_id(), ], - vec![0_u128.into(), 0_u128.into()], + vec![0_u128.into(), 0_u128.into(), 0_u128.into()], instruction, ) .unwrap(); @@ -2818,11 +3207,12 @@ fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() { &[ &PrivateKeysForTests::user_token_a_key(), &PrivateKeysForTests::user_token_b_key(), + &PrivateKeysForTests::user_token_lp_key(), ], ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -2849,7 +3239,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() { assert_eq!(user_token_lp_post, expected_user_token_lp); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() { let mut state = state_for_amm_tests_with_new_def(); @@ -2907,7 +3296,7 @@ fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() { ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -2934,7 +3323,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() { assert_eq!(user_token_lp_post, expected_user_token_lp); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_new_definition_uninitialized_pool() { let mut state = state_for_amm_tests_with_new_def(); @@ -2966,7 +3354,7 @@ fn simple_amm_new_definition_uninitialized_pool() { IdForExeTests::user_token_b_id(), IdForExeTests::user_token_lp_id(), ], - vec![0_u128.into(), 0_u128.into()], + vec![0_u128.into(), 0_u128.into(), 0_u128.into()], instruction, ) .unwrap(); @@ -2976,11 +3364,12 @@ fn simple_amm_new_definition_uninitialized_pool() { &[ &PrivateKeysForTests::user_token_a_key(), &PrivateKeysForTests::user_token_b_key(), + &PrivateKeysForTests::user_token_lp_key(), ], ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -3007,7 +3396,6 @@ fn simple_amm_new_definition_uninitialized_pool() { assert_eq!(user_token_lp_post, expected_user_token_lp); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_add() { let mut state = state_for_amm_tests(); @@ -3043,7 +3431,7 @@ fn simple_amm_add() { ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -3070,12 +3458,11 @@ fn simple_amm_add() { assert_eq!(user_token_lp_post, expected_user_token_lp); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_swap_1() { let mut state = state_for_amm_tests(); - let instruction = amm_core::Instruction::Swap { + let instruction = amm_core::Instruction::SwapExactInput { swap_amount_in: BalanceForExeTests::swap_amount_in(), min_amount_out: BalanceForExeTests::swap_min_amount_out(), token_definition_id_in: IdForExeTests::token_b_definition_id(), @@ -3101,7 +3488,7 @@ fn simple_amm_swap_1() { ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); @@ -3122,12 +3509,11 @@ fn simple_amm_swap_1() { assert_eq!(user_token_b_post, expected_user_token_b); } -#[cfg(feature = "nssa")] #[test] fn simple_amm_swap_2() { let mut state = state_for_amm_tests(); - let instruction = amm_core::Instruction::Swap { + let instruction = amm_core::Instruction::SwapExactInput { swap_amount_in: BalanceForExeTests::swap_amount_in(), min_amount_out: BalanceForExeTests::swap_min_amount_out(), token_definition_id_in: IdForExeTests::token_a_definition_id(), @@ -3152,7 +3538,7 @@ fn simple_amm_swap_2() { ); let tx = PublicTransaction::new(message, witness_set); - state.transition_from_public_transaction(&tx).unwrap(); + state.transition_from_public_transaction(&tx, 1, 0).unwrap(); let pool_post = state.get_account_by_id(IdForExeTests::pool_definition_id()); let vault_a_post = state.get_account_by_id(IdForExeTests::vault_a_id()); diff --git a/programs/associated_token_account/Cargo.toml b/programs/associated_token_account/Cargo.toml new file mode 100644 index 00000000..98e0bfd0 --- /dev/null +++ b/programs/associated_token_account/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "ata_program" +version = "0.1.0" +edition = "2024" +license = { workspace = true } + +[dependencies] +nssa_core.workspace = true +token_core.workspace = true +ata_core.workspace = true diff --git a/programs/associated_token_account/core/Cargo.toml b/programs/associated_token_account/core/Cargo.toml new file mode 100644 index 00000000..7ca8d7fa --- /dev/null +++ b/programs/associated_token_account/core/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "ata_core" +version = "0.1.0" +edition = "2024" +license = { workspace = true } + +[dependencies] +nssa_core.workspace = true +serde.workspace = true +risc0-zkvm.workspace = true diff --git a/programs/associated_token_account/core/src/lib.rs b/programs/associated_token_account/core/src/lib.rs new file mode 100644 index 00000000..994c632b --- /dev/null +++ b/programs/associated_token_account/core/src/lib.rs @@ -0,0 +1,82 @@ +pub use nssa_core::program::PdaSeed; +use nssa_core::{ + account::{AccountId, AccountWithMetadata}, + program::ProgramId, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub enum Instruction { + /// Create the Associated Token Account for (owner, definition). + /// Idempotent: no-op if the account already exists. + /// + /// Required accounts (3): + /// - Owner account + /// - Token definition account + /// - Associated token account (default/uninitialized, or already initialized) + /// + /// `token_program_id` is derived from `token_definition.account.program_owner`. + Create { ata_program_id: ProgramId }, + + /// Transfer tokens FROM owner's ATA to a recipient holding account. + /// Uses PDA seeds to authorize the ATA in the chained Token::Transfer call. + /// + /// Required accounts (3): + /// - Owner account (authorized) + /// - Sender ATA (owner's token holding) + /// - Recipient token holding (any account; auto-created if default) + /// + /// `token_program_id` is derived from `sender_ata.account.program_owner`. + Transfer { + ata_program_id: ProgramId, + amount: u128, + }, + + /// Burn tokens FROM owner's ATA. + /// Uses PDA seeds to authorize the ATA in the chained Token::Burn call. + /// + /// Required accounts (3): + /// - Owner account (authorized) + /// - Owner's ATA (the holding to burn from) + /// - Token definition account + /// + /// `token_program_id` is derived from `holder_ata.account.program_owner`. + Burn { + ata_program_id: ProgramId, + amount: u128, + }, +} + +pub fn compute_ata_seed(owner_id: AccountId, definition_id: AccountId) -> PdaSeed { + use risc0_zkvm::sha::{Impl, Sha256}; + let mut bytes = [0u8; 64]; + bytes[0..32].copy_from_slice(&owner_id.to_bytes()); + bytes[32..64].copy_from_slice(&definition_id.to_bytes()); + PdaSeed::new( + Impl::hash_bytes(&bytes) + .as_bytes() + .try_into() + .expect("Hash output must be exactly 32 bytes long"), + ) +} + +pub fn get_associated_token_account_id(ata_program_id: &ProgramId, seed: &PdaSeed) -> AccountId { + AccountId::from((ata_program_id, seed)) +} + +/// Verify the ATA's address matches `(ata_program_id, owner, definition)` and return +/// the [`PdaSeed`] for use in chained calls. +pub fn verify_ata_and_get_seed( + ata_account: &AccountWithMetadata, + owner: &AccountWithMetadata, + definition_id: AccountId, + ata_program_id: ProgramId, +) -> PdaSeed { + let seed = compute_ata_seed(owner.account_id, definition_id); + let expected_id = get_associated_token_account_id(&ata_program_id, &seed); + assert_eq!( + ata_account.account_id, expected_id, + "ATA account ID does not match expected derivation" + ); + seed +} diff --git a/programs/associated_token_account/src/burn.rs b/programs/associated_token_account/src/burn.rs new file mode 100644 index 00000000..4940fdeb --- /dev/null +++ b/programs/associated_token_account/src/burn.rs @@ -0,0 +1,39 @@ +use nssa_core::{ + account::AccountWithMetadata, + program::{AccountPostState, ChainedCall, ProgramId}, +}; +use token_core::TokenHolding; + +pub fn burn_from_associated_token_account( + owner: AccountWithMetadata, + holder_ata: AccountWithMetadata, + token_definition: AccountWithMetadata, + ata_program_id: ProgramId, + amount: u128, +) -> (Vec, Vec) { + let token_program_id = holder_ata.account.program_owner; + assert!(owner.is_authorized, "Owner authorization is missing"); + let definition_id = TokenHolding::try_from(&holder_ata.account.data) + .expect("Holder ATA must hold a valid token") + .definition_id(); + let seed = + ata_core::verify_ata_and_get_seed(&holder_ata, &owner, definition_id, ata_program_id); + + let post_states = vec![ + AccountPostState::new(owner.account.clone()), + AccountPostState::new(holder_ata.account.clone()), + AccountPostState::new(token_definition.account.clone()), + ]; + let mut holder_ata_auth = holder_ata.clone(); + holder_ata_auth.is_authorized = true; + + let chained_call = ChainedCall::new( + token_program_id, + vec![token_definition.clone(), holder_ata_auth], + &token_core::Instruction::Burn { + amount_to_burn: amount, + }, + ) + .with_pda_seeds(vec![seed]); + (post_states, vec![chained_call]) +} diff --git a/programs/associated_token_account/src/create.rs b/programs/associated_token_account/src/create.rs new file mode 100644 index 00000000..d44f5d1c --- /dev/null +++ b/programs/associated_token_account/src/create.rs @@ -0,0 +1,50 @@ +use nssa_core::{ + account::{Account, AccountWithMetadata}, + program::{AccountPostState, ChainedCall, Claim, ProgramId}, +}; + +pub fn create_associated_token_account( + owner: AccountWithMetadata, + token_definition: AccountWithMetadata, + ata_account: AccountWithMetadata, + ata_program_id: ProgramId, +) -> (Vec, Vec) { + // No authorization check needed: create is idempotent, so anyone can call it safely. + let token_program_id = token_definition.account.program_owner; + let ata_seed = ata_core::verify_ata_and_get_seed( + &ata_account, + &owner, + token_definition.account_id, + ata_program_id, + ); + + // Idempotent: already initialized → no-op + if ata_account.account != Account::default() { + return ( + vec![ + AccountPostState::new_claimed_if_default(owner.account.clone(), Claim::Authorized), + AccountPostState::new(token_definition.account.clone()), + AccountPostState::new(ata_account.account.clone()), + ], + vec![], + ); + } + + let post_states = vec![ + AccountPostState::new_claimed_if_default(owner.account.clone(), Claim::Authorized), + AccountPostState::new(token_definition.account.clone()), + AccountPostState::new(ata_account.account.clone()), + ]; + let ata_account_auth = AccountWithMetadata { + is_authorized: true, + ..ata_account.clone() + }; + let chained_call = ChainedCall::new( + token_program_id, + vec![token_definition.clone(), ata_account_auth], + &token_core::Instruction::InitializeAccount, + ) + .with_pda_seeds(vec![ata_seed]); + + (post_states, vec![chained_call]) +} diff --git a/programs/associated_token_account/src/lib.rs b/programs/associated_token_account/src/lib.rs new file mode 100644 index 00000000..13740f0a --- /dev/null +++ b/programs/associated_token_account/src/lib.rs @@ -0,0 +1,10 @@ +//! The Associated Token Account Program implementation. + +pub use ata_core as core; + +pub mod burn; +pub mod create; +pub mod transfer; + +#[cfg(test)] +mod tests; diff --git a/programs/associated_token_account/src/tests.rs b/programs/associated_token_account/src/tests.rs new file mode 100644 index 00000000..9835bf37 --- /dev/null +++ b/programs/associated_token_account/src/tests.rs @@ -0,0 +1,153 @@ +#![cfg(test)] + +use ata_core::{compute_ata_seed, get_associated_token_account_id}; +use nssa_core::account::{Account, AccountId, AccountWithMetadata, Data}; +use token_core::{TokenDefinition, TokenHolding}; + +const ATA_PROGRAM_ID: nssa_core::program::ProgramId = [1u32; 8]; +const TOKEN_PROGRAM_ID: nssa_core::program::ProgramId = [2u32; 8]; + +fn owner_id() -> AccountId { + AccountId::new([0x01u8; 32]) +} + +fn definition_id() -> AccountId { + AccountId::new([0x02u8; 32]) +} + +fn ata_id() -> AccountId { + get_associated_token_account_id( + &ATA_PROGRAM_ID, + &compute_ata_seed(owner_id(), definition_id()), + ) +} + +fn owner_account() -> AccountWithMetadata { + AccountWithMetadata { + account: Account::default(), + is_authorized: true, + account_id: owner_id(), + } +} + +fn definition_account() -> AccountWithMetadata { + AccountWithMetadata { + account: Account { + program_owner: TOKEN_PROGRAM_ID, + balance: 0, + data: Data::from(&TokenDefinition::Fungible { + name: "TEST".to_string(), + total_supply: 1000, + metadata_id: None, + }), + nonce: nssa_core::account::Nonce(0), + }, + is_authorized: false, + account_id: definition_id(), + } +} + +fn uninitialized_ata_account() -> AccountWithMetadata { + AccountWithMetadata { + account: Account::default(), + is_authorized: false, + account_id: ata_id(), + } +} + +fn initialized_ata_account() -> AccountWithMetadata { + AccountWithMetadata { + account: Account { + program_owner: TOKEN_PROGRAM_ID, + balance: 0, + data: Data::from(&TokenHolding::Fungible { + definition_id: definition_id(), + balance: 100, + }), + nonce: nssa_core::account::Nonce(0), + }, + is_authorized: false, + account_id: ata_id(), + } +} + +#[test] +fn create_emits_chained_call_for_uninitialized_ata() { + let (post_states, chained_calls) = crate::create::create_associated_token_account( + owner_account(), + definition_account(), + uninitialized_ata_account(), + ATA_PROGRAM_ID, + ); + + assert_eq!(post_states.len(), 3); + assert_eq!(chained_calls.len(), 1); + assert_eq!(chained_calls[0].program_id, TOKEN_PROGRAM_ID); +} + +#[test] +fn create_is_idempotent_for_initialized_ata() { + let (post_states, chained_calls) = crate::create::create_associated_token_account( + owner_account(), + definition_account(), + initialized_ata_account(), + ATA_PROGRAM_ID, + ); + + assert_eq!(post_states.len(), 3); + assert!( + chained_calls.is_empty(), + "Should emit no chained call for already-initialized ATA" + ); +} + +#[test] +#[should_panic(expected = "ATA account ID does not match expected derivation")] +fn create_panics_on_wrong_ata_address() { + let wrong_ata = AccountWithMetadata { + account: Account::default(), + is_authorized: false, + account_id: AccountId::new([0xFFu8; 32]), + }; + + crate::create::create_associated_token_account( + owner_account(), + definition_account(), + wrong_ata, + ATA_PROGRAM_ID, + ); +} + +#[test] +fn get_associated_token_account_id_is_deterministic() { + let seed = compute_ata_seed(owner_id(), definition_id()); + let id1 = get_associated_token_account_id(&ATA_PROGRAM_ID, &seed); + let id2 = get_associated_token_account_id(&ATA_PROGRAM_ID, &seed); + assert_eq!(id1, id2); +} + +#[test] +fn get_associated_token_account_id_differs_by_owner() { + let other_owner = AccountId::new([0x99u8; 32]); + let id1 = get_associated_token_account_id( + &ATA_PROGRAM_ID, + &compute_ata_seed(owner_id(), definition_id()), + ); + let id2 = get_associated_token_account_id( + &ATA_PROGRAM_ID, + &compute_ata_seed(other_owner, definition_id()), + ); + assert_ne!(id1, id2); +} + +#[test] +fn get_associated_token_account_id_differs_by_definition() { + let other_def = AccountId::new([0x99u8; 32]); + let id1 = get_associated_token_account_id( + &ATA_PROGRAM_ID, + &compute_ata_seed(owner_id(), definition_id()), + ); + let id2 = + get_associated_token_account_id(&ATA_PROGRAM_ID, &compute_ata_seed(owner_id(), other_def)); + assert_ne!(id1, id2); +} diff --git a/programs/associated_token_account/src/transfer.rs b/programs/associated_token_account/src/transfer.rs new file mode 100644 index 00000000..89d70135 --- /dev/null +++ b/programs/associated_token_account/src/transfer.rs @@ -0,0 +1,39 @@ +use nssa_core::{ + account::AccountWithMetadata, + program::{AccountPostState, ChainedCall, ProgramId}, +}; +use token_core::TokenHolding; + +pub fn transfer_from_associated_token_account( + owner: AccountWithMetadata, + sender_ata: AccountWithMetadata, + recipient: AccountWithMetadata, + ata_program_id: ProgramId, + amount: u128, +) -> (Vec, Vec) { + let token_program_id = sender_ata.account.program_owner; + assert!(owner.is_authorized, "Owner authorization is missing"); + let definition_id = TokenHolding::try_from(&sender_ata.account.data) + .expect("Sender ATA must hold a valid token") + .definition_id(); + let seed = + ata_core::verify_ata_and_get_seed(&sender_ata, &owner, definition_id, ata_program_id); + + let post_states = vec![ + AccountPostState::new(owner.account.clone()), + AccountPostState::new(sender_ata.account.clone()), + AccountPostState::new(recipient.account.clone()), + ]; + let mut sender_ata_auth = sender_ata.clone(); + sender_ata_auth.is_authorized = true; + + let chained_call = ChainedCall::new( + token_program_id, + vec![sender_ata_auth, recipient.clone()], + &token_core::Instruction::Transfer { + amount_to_transfer: amount, + }, + ) + .with_pda_seeds(vec![seed]); + (post_states, vec![chained_call]) +} diff --git a/programs/clock/core/Cargo.toml b/programs/clock/core/Cargo.toml new file mode 100644 index 00000000..53a43b6d --- /dev/null +++ b/programs/clock/core/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "clock_core" +version = "0.1.0" +edition = "2024" +license = { workspace = true } + +[lints] +workspace = true + +[dependencies] +nssa_core.workspace = true +borsh.workspace = true diff --git a/programs/clock/core/src/lib.rs b/programs/clock/core/src/lib.rs new file mode 100644 index 00000000..5fc03633 --- /dev/null +++ b/programs/clock/core/src/lib.rs @@ -0,0 +1,42 @@ +//! Core data structures and constants for the Clock Program. + +use borsh::{BorshDeserialize, BorshSerialize}; +use nssa_core::{Timestamp, account::AccountId}; + +pub const CLOCK_01_PROGRAM_ACCOUNT_ID: AccountId = + AccountId::new(*b"/LEZ/ClockProgramAccount/0000001"); + +pub const CLOCK_10_PROGRAM_ACCOUNT_ID: AccountId = + AccountId::new(*b"/LEZ/ClockProgramAccount/0000010"); + +pub const CLOCK_50_PROGRAM_ACCOUNT_ID: AccountId = + AccountId::new(*b"/LEZ/ClockProgramAccount/0000050"); + +/// All clock program account ID in the order expected by the clock program. +pub const CLOCK_PROGRAM_ACCOUNT_IDS: [AccountId; 3] = [ + CLOCK_01_PROGRAM_ACCOUNT_ID, + CLOCK_10_PROGRAM_ACCOUNT_ID, + CLOCK_50_PROGRAM_ACCOUNT_ID, +]; + +/// The instruction type for the Clock Program. The sequencer passes the current block timestamp. +pub type Instruction = Timestamp; + +/// The data stored in a clock account. +#[derive(Debug, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +pub struct ClockAccountData { + pub block_id: u64, + pub timestamp: Timestamp, +} + +impl ClockAccountData { + #[must_use] + pub fn to_bytes(self) -> Vec { + borsh::to_vec(&self).expect("ClockAccountData serialization should not fail") + } + + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + borsh::from_slice(bytes).expect("ClockAccountData deserialization should not fail") + } +} diff --git a/programs/token/core/src/lib.rs b/programs/token/core/src/lib.rs index 1edbc895..79f49303 100644 --- a/programs/token/core/src/lib.rs +++ b/programs/token/core/src/lib.rs @@ -10,23 +10,23 @@ pub enum Instruction { /// Transfer tokens from sender to recipient. /// /// Required accounts: - /// - Sender's Token Holding account (authorized), - /// - Recipient's Token Holding account. + /// - Sender's Token Holding account (initialized, authorized), + /// - Recipient's Token Holding account (initialized or authorized and uninitialized). Transfer { amount_to_transfer: u128 }, /// Create a new fungible token definition without metadata. /// /// Required accounts: - /// - Token Definition account (uninitialized), - /// - Token Holding account (uninitialized). + /// - Token Definition account (uninitialized, authorized), + /// - Token Holding account (uninitialized, authorized). NewFungibleDefinition { name: String, total_supply: u128 }, /// Create a new fungible or non-fungible token definition with metadata. /// /// Required accounts: - /// - Token Definition account (uninitialized), - /// - Token Holding account (uninitialized), - /// - Token Metadata account (uninitialized). + /// - Token Definition account (uninitialized, authorized), + /// - Token Holding account (uninitialized, authorized), + /// - Token Metadata account (uninitialized, authorized). NewDefinitionWithMetadata { new_definition: NewTokenDefinition, /// Boxed to avoid large enum variant size. @@ -36,29 +36,29 @@ pub enum Instruction { /// Initialize a token holding account for a given token definition. /// /// Required accounts: - /// - Token Definition account (initialized), - /// - Token Holding account (uninitialized), + /// - Token Definition account (initialized, any authorization), + /// - Token Holding account (uninitialized, authorized), InitializeAccount, /// Burn tokens from the holder's account. /// /// Required accounts: - /// - Token Definition account (initialized), - /// - Token Holding account (authorized). + /// - Token Definition account (initialized, any authorization), + /// - Token Holding account (initialized, authorized). Burn { amount_to_burn: u128 }, /// Mint new tokens to the holder's account. /// /// Required accounts: - /// - Token Definition account (authorized), - /// - Token Holding account (uninitialized or initialized). + /// - Token Definition account (initialized, authorized), + /// - Token Holding account (uninitialized or authorized and initialized). Mint { amount_to_mint: u128 }, /// Print a new NFT from the master copy. /// /// Required accounts: /// - NFT Master Token Holding account (authorized), - /// - NFT Printed Copy Token Holding account (uninitialized). + /// - NFT Printed Copy Token Holding account (uninitialized, authorized). PrintNft, } diff --git a/programs/token/src/initialize.rs b/programs/token/src/initialize.rs index dc0b612a..fabb8fd9 100644 --- a/programs/token/src/initialize.rs +++ b/programs/token/src/initialize.rs @@ -1,6 +1,6 @@ use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::AccountPostState, + program::{AccountPostState, Claim}, }; use token_core::{TokenDefinition, TokenHolding}; @@ -30,6 +30,6 @@ pub fn initialize_account( vec![ AccountPostState::new(definition_post), - AccountPostState::new_claimed(account_to_initialize), + AccountPostState::new_claimed(account_to_initialize, Claim::Authorized), ] } diff --git a/programs/token/src/mint.rs b/programs/token/src/mint.rs index 8b157340..5a15d81f 100644 --- a/programs/token/src/mint.rs +++ b/programs/token/src/mint.rs @@ -1,6 +1,6 @@ use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::AccountPostState, + program::{AccountPostState, Claim}, }; use token_core::{TokenDefinition, TokenHolding}; @@ -67,6 +67,6 @@ pub fn mint( vec![ AccountPostState::new(definition_post), - AccountPostState::new_claimed_if_default(holding_post), + AccountPostState::new_claimed_if_default(holding_post, Claim::Authorized), ] } diff --git a/programs/token/src/new_definition.rs b/programs/token/src/new_definition.rs index 8da55dc1..ba510feb 100644 --- a/programs/token/src/new_definition.rs +++ b/programs/token/src/new_definition.rs @@ -1,6 +1,6 @@ use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::AccountPostState, + program::{AccountPostState, Claim}, }; use token_core::{ NewTokenDefinition, NewTokenMetadata, TokenDefinition, TokenHolding, TokenMetadata, @@ -42,8 +42,8 @@ pub fn new_fungible_definition( holding_target_account_post.data = Data::from(&token_holding); vec![ - AccountPostState::new_claimed(definition_target_account_post), - AccountPostState::new_claimed(holding_target_account_post), + AccountPostState::new_claimed(definition_target_account_post, Claim::Authorized), + AccountPostState::new_claimed(holding_target_account_post, Claim::Authorized), ] } @@ -119,8 +119,8 @@ pub fn new_definition_with_metadata( metadata_target_account_post.data = Data::from(&token_metadata); vec![ - AccountPostState::new_claimed(definition_target_account_post), - AccountPostState::new_claimed(holding_target_account_post), - AccountPostState::new_claimed(metadata_target_account_post), + AccountPostState::new_claimed(definition_target_account_post, Claim::Authorized), + AccountPostState::new_claimed(holding_target_account_post, Claim::Authorized), + AccountPostState::new_claimed(metadata_target_account_post, Claim::Authorized), ] } diff --git a/programs/token/src/print_nft.rs b/programs/token/src/print_nft.rs index c7177a43..6bc9612d 100644 --- a/programs/token/src/print_nft.rs +++ b/programs/token/src/print_nft.rs @@ -1,6 +1,6 @@ use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::AccountPostState, + program::{AccountPostState, Claim}, }; use token_core::TokenHolding; @@ -50,6 +50,6 @@ pub fn print_nft( vec![ AccountPostState::new(master_account_post), - AccountPostState::new_claimed(printed_account_post), + AccountPostState::new_claimed(printed_account_post, Claim::Authorized), ] } diff --git a/programs/token/src/tests.rs b/programs/token/src/tests.rs index 640d6d76..4c28d769 100644 --- a/programs/token/src/tests.rs +++ b/programs/token/src/tests.rs @@ -5,7 +5,10 @@ reason = "We don't care about it in tests" )] -use nssa_core::account::{Account, AccountId, AccountWithMetadata, Data}; +use nssa_core::{ + account::{Account, AccountId, AccountWithMetadata, Data}, + program::Claim, +}; use token_core::{ MetadataStandard, NewTokenDefinition, NewTokenMetadata, TokenDefinition, TokenHolding, }; @@ -851,7 +854,7 @@ fn mint_uninit_holding_success() { *holding_post.account(), AccountForTests::init_mint().account ); - assert!(holding_post.requires_claim()); + assert_eq!(holding_post.required_claim(), Some(Claim::Authorized)); } #[test] diff --git a/programs/token/src/transfer.rs b/programs/token/src/transfer.rs index 392f630e..2ffd2339 100644 --- a/programs/token/src/transfer.rs +++ b/programs/token/src/transfer.rs @@ -1,6 +1,6 @@ use nssa_core::{ account::{Account, AccountWithMetadata, Data}, - program::AccountPostState, + program::{AccountPostState, Claim}, }; use token_core::TokenHolding; @@ -106,6 +106,6 @@ pub fn transfer( vec![ AccountPostState::new(sender_post), - AccountPostState::new_claimed_if_default(recipient_post), + AccountPostState::new_claimed_if_default(recipient_post, Claim::Authorized), ] } diff --git a/sequencer_core/Cargo.toml b/sequencer/core/Cargo.toml similarity index 87% rename from sequencer_core/Cargo.toml rename to sequencer/core/Cargo.toml index 334f093c..efd0e359 100644 --- a/sequencer_core/Cargo.toml +++ b/sequencer/core/Cargo.toml @@ -14,8 +14,8 @@ common.workspace = true storage.workspace = true mempool.workspace = true bedrock_client.workspace = true +testnet_initial_state.workspace = true -base58.workspace = true anyhow.workspace = true serde.workspace = true serde_json.workspace = true @@ -40,3 +40,5 @@ mock = [] [dev-dependencies] futures.workspace = true +test_program_methods.workspace = true +nssa = { workspace = true, features = ["test-utils"] } diff --git a/sequencer_core/src/block_settlement_client.rs b/sequencer/core/src/block_settlement_client.rs similarity index 100% rename from sequencer_core/src/block_settlement_client.rs rename to sequencer/core/src/block_settlement_client.rs diff --git a/sequencer_core/src/block_store.rs b/sequencer/core/src/block_store.rs similarity index 88% rename from sequencer_core/src/block_store.rs rename to sequencer/core/src/block_store.rs index eb541188..46f71797 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer/core/src/block_store.rs @@ -6,8 +6,8 @@ use common::{ block::{Block, BlockMeta, MantleMsgId}, transaction::NSSATransaction, }; -use nssa::V02State; -use storage::sequencer::RocksDBIO; +use nssa::V03State; +use storage::{error::DbError, sequencer::RocksDBIO}; pub struct SequencerStore { dbio: RocksDBIO, @@ -42,8 +42,8 @@ impl SequencerStore { }) } - pub fn get_block_at_id(&self, id: u64) -> Result { - Ok(self.dbio.get_block(id)?) + pub fn get_block_at_id(&self, id: u64) -> Result, DbError> { + self.dbio.get_block(id) } pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> { @@ -56,16 +56,20 @@ impl SequencerStore { /// Returns the transaction corresponding to the given hash, if it exists in the blockchain. pub fn get_transaction_by_hash(&self, hash: HashType) -> Option { - let block_id = self.tx_hash_to_block_map.get(&hash); - let block = block_id.map(|&id| self.get_block_at_id(id)); - if let Some(Ok(block)) = block { - for transaction in block.body.transactions { - if transaction.hash() == hash { - return Some(transaction); - } + let block_id = *self.tx_hash_to_block_map.get(&hash)?; + let block = self + .get_block_at_id(block_id) + .ok() + .flatten() + .expect("Block should be present since the hash is in the map"); + for transaction in block.body.transactions { + if transaction.hash() == hash { + return Some(transaction); } } - None + panic!( + "Transaction hash was in the map but transaction was not found in the block. This should never happen." + ); } pub fn latest_block_meta(&self) -> Result { @@ -88,7 +92,7 @@ impl SequencerStore { &mut self, block: &Block, msg_id: MantleMsgId, - state: &V02State, + state: &V03State, ) -> Result<()> { let new_transactions_map = block_to_transactions_map(block); self.dbio.atomic_update(block, msg_id, state)?; @@ -96,7 +100,7 @@ impl SequencerStore { Ok(()) } - pub fn get_nssa_state(&self) -> Option { + pub fn get_nssa_state(&self) -> Option { self.dbio.get_nssa_state().ok() } } @@ -146,7 +150,7 @@ mod tests { let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(None, retrieved_tx); // Add the block with the transaction - let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0); node_store.update(&block, [1; 32], &dummy_state).unwrap(); // Try again let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); @@ -205,7 +209,7 @@ mod tests { let block_hash = block.header.hash; let block_msg_id = [1; 32]; - let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0); node_store .update(&block, block_msg_id, &dummy_state) .unwrap(); @@ -240,11 +244,11 @@ mod tests { let block = common::test_utils::produce_dummy_block(1, None, vec![tx]); let block_id = block.header.block_id; - let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0); node_store.update(&block, [1; 32], &dummy_state).unwrap(); // Verify initial status is Pending - let retrieved_block = node_store.get_block_at_id(block_id).unwrap(); + let retrieved_block = node_store.get_block_at_id(block_id).unwrap().unwrap(); assert!(matches!( retrieved_block.bedrock_status, common::block::BedrockStatus::Pending @@ -254,7 +258,7 @@ mod tests { node_store.mark_block_as_finalized(block_id).unwrap(); // Verify status is now Finalized - let finalized_block = node_store.get_block_at_id(block_id).unwrap(); + let finalized_block = node_store.get_block_at_id(block_id).unwrap().unwrap(); assert!(matches!( finalized_block.bedrock_status, common::block::BedrockStatus::Finalized diff --git a/sequencer_core/src/config.rs b/sequencer/core/src/config.rs similarity index 76% rename from sequencer_core/src/config.rs rename to sequencer/core/src/config.rs index 097d1391..fa4a2fa7 100644 --- a/sequencer_core/src/config.rs +++ b/sequencer/core/src/config.rs @@ -8,13 +8,11 @@ use std::{ use anyhow::Result; use bedrock_client::BackoffConfig; use bytesize::ByteSize; -use common::{ - block::{AccountInitialData, CommitmentsInitialData}, - config::BasicAuth, -}; +use common::config::BasicAuth; use humantime_serde; use logos_blockchain_core::mantle::ops::channel::ChannelId; use serde::{Deserialize, Serialize}; +use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData}; use url::Url; // TODO: Provide default values @@ -22,15 +20,14 @@ use url::Url; pub struct SequencerConfig { /// Home dir of sequencer storage. pub home: PathBuf, - /// Override rust log (env var logging level). - pub override_rust_log: Option, /// Genesis id. pub genesis_id: u64, /// If `True`, then adds random sequence of bytes to genesis block. pub is_genesis_random: bool, - /// Maximum number of transactions in block. + /// Maximum number of user transactions in a block (excludes the mandatory clock transaction). pub max_num_tx_in_block: usize, - /// Maximum block size (includes header and transactions). + /// Maximum block size (includes header, user transactions, and the mandatory clock + /// transaction). #[serde(default = "default_max_block_size")] pub max_block_size: ByteSize, /// Mempool maximum size. @@ -41,18 +38,16 @@ pub struct SequencerConfig { /// Interval in which pending blocks are retried. #[serde(with = "humantime_serde")] pub retry_pending_blocks_timeout: Duration, - /// Port to listen. - pub port: u16, - /// List of initial accounts data. - pub initial_accounts: Vec, - /// List of initial commitments. - pub initial_commitments: Vec, /// Sequencer own signing key. pub signing_key: [u8; 32], /// Bedrock configuration options. pub bedrock_config: BedrockConfig, /// Indexer RPC URL. pub indexer_rpc_url: Url, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_public_accounts: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_private_accounts: Option>, } #[derive(Clone, Serialize, Deserialize)] diff --git a/sequencer_core/src/indexer_client.rs b/sequencer/core/src/indexer_client.rs similarity index 100% rename from sequencer_core/src/indexer_client.rs rename to sequencer/core/src/indexer_client.rs diff --git a/sequencer_core/src/lib.rs b/sequencer/core/src/lib.rs similarity index 69% rename from sequencer_core/src/lib.rs rename to sequencer/core/src/lib.rs index c844c193..642d663a 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer/core/src/lib.rs @@ -7,7 +7,7 @@ use common::PINATA_BASE58; use common::{ HashType, block::{BedrockStatus, Block, HashableBlockData}, - transaction::NSSATransaction, + transaction::{NSSATransaction, clock_invocation}, }; use config::SequencerConfig; use log::{error, info, warn}; @@ -15,6 +15,9 @@ use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SI use mempool::{MemPool, MemPoolHandle}; #[cfg(feature = "mock")] pub use mock::SequencerCoreWithMockClients; +use nssa::V03State; +pub use storage::error::DbError; +use testnet_initial_state::initial_state; use crate::{ block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, @@ -34,7 +37,7 @@ pub struct SequencerCore< BC: BlockSettlementClientTrait = BlockSettlementClient, IC: IndexerClientTrait = IndexerClient, > { - state: nssa::V02State, + state: nssa::V03State, store: SequencerStore, mempool: MemPool, sequencer_config: SequencerConfig, @@ -97,30 +100,49 @@ impl SequencerCore = config - .initial_commitments - .iter() - .map(|init_comm_data| { - let npk = &init_comm_data.npk; - let mut acc = init_comm_data.account.clone(); + let initial_commitments: Option> = config + .initial_private_accounts + .clone() + .map(|initial_commitments| { + initial_commitments + .iter() + .map(|init_comm_data| { + let npk = &init_comm_data.npk; - acc.program_owner = - nssa::program::Program::authenticated_transfer_program().id(); + let mut acc = init_comm_data.account.clone(); - nssa_core::Commitment::new(npk, &acc) - }) - .collect(); + acc.program_owner = + nssa::program::Program::authenticated_transfer_program().id(); - let init_accs: Vec<(nssa::AccountId, u128)> = config - .initial_accounts - .iter() - .map(|acc_data| (acc_data.account_id, acc_data.balance)) - .collect(); + nssa_core::Commitment::new(npk, &acc) + }) + .collect() + }); - nssa::V02State::new_with_genesis_accounts(&init_accs, &initial_commitments) + let init_accs: Option> = config + .initial_public_accounts + .clone() + .map(|initial_accounts| { + initial_accounts + .iter() + .map(|acc_data| (acc_data.account_id, acc_data.balance)) + .collect() + }); + + // If initial commitments or accounts are present in config, need to construct state + // from them + if initial_commitments.is_some() || init_accs.is_some() { + V03State::new_with_genesis_accounts( + &init_accs.unwrap_or_default(), + &initial_commitments.unwrap_or_default(), + genesis_block.header.timestamp, + ) + } else { + initial_state() + } }; #[cfg(feature = "testnet")] @@ -141,24 +163,6 @@ impl SequencerCore Result { - match &tx { - NSSATransaction::Public(tx) => self.state.transition_from_public_transaction(tx), - NSSATransaction::PrivacyPreserving(tx) => self - .state - .transition_from_privacy_preserving_transaction(tx), - NSSATransaction::ProgramDeployment(tx) => self - .state - .transition_from_program_deployment_transaction(tx), - } - .inspect_err(|err| warn!("Error at transition {err:#?}"))?; - - Ok(tx) - } - pub async fn produce_new_block(&mut self) -> Result { let (tx, _msg_id) = self .produce_new_block_with_mempool_transactions() @@ -183,10 +187,7 @@ impl SequencerCore Result<(SignedMantleTx, MsgId)> { let now = Instant::now(); - let new_block_height = self - .chain_height - .checked_add(1) - .with_context(|| format!("Max block height reached: {}", self.chain_height))?; + let new_block_height = self.next_block_id(); let mut valid_transactions = vec![]; @@ -198,20 +199,28 @@ impl SequencerCore SequencerCore { - valid_transactions.push(valid_tx); - - info!("Validated transaction with hash {tx_hash}, including it in block"); - - if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block { - break; - } - } + let validated_diff = match tx.validate_on_state( + &self.state, + new_block_height, + new_block_timestamp, + ) { + Ok(diff) => diff, Err(err) => { error!( "Transaction with hash {tx_hash} failed execution check with error: {err:#?}, skipping it", ); - // TODO: Probably need to handle unsuccessful transaction execution? + continue; } + }; + + self.state.apply_state_diff(validated_diff); + + valid_transactions.push(tx); + info!("Validated transaction with hash {tx_hash}, including it in block"); + if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block { + break; } } + // Append the Clock Program invocation as the mandatory last transaction. + self.state + .transition_from_public_transaction(&clock_tx, new_block_height, new_block_timestamp) + .context("Clock transaction failed. Aborting block production.")?; + valid_transactions.push(clock_nssa_tx); + let hashable_data = HashableBlockData { block_id: new_block_height, transactions: valid_transactions, prev_block_hash: latest_block_meta.hash, - timestamp: curr_time, + timestamp: new_block_timestamp, }; let block = hashable_data @@ -281,7 +300,7 @@ impl SequencerCore &nssa::V02State { + pub const fn state(&self) -> &nssa::V03State { &self.state } @@ -333,6 +352,12 @@ impl SequencerCore IC { self.indexer_client.clone() } + + fn next_block_id(&self) -> u64 { + self.chain_height + .checked_add(1) + .unwrap_or_else(|| panic!("Max block height reached: {}", self.chain_height)) + } } /// Load signing key from file or generate a new one if it doesn't exist. @@ -362,41 +387,34 @@ fn load_or_create_signing_key(path: &Path) -> Result { mod tests { #![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")] - use std::{pin::pin, str::FromStr as _, time::Duration}; + use std::{pin::pin, time::Duration}; - use base58::ToBase58 as _; use bedrock_client::BackoffConfig; use common::{ - block::AccountInitialData, test_utils::sequencer_sign_key_for_testing, - transaction::NSSATransaction, + test_utils::sequencer_sign_key_for_testing, + transaction::{NSSATransaction, clock_invocation}, }; use logos_blockchain_core::mantle::ops::channel::ChannelId; use mempool::MemPoolHandle; - use nssa::{AccountId, PrivateKey}; + use testnet_initial_state::{initial_accounts, initial_pub_accounts_private_keys}; use crate::{ config::{BedrockConfig, SequencerConfig}, mock::SequencerCoreWithMockClients, }; - fn setup_sequencer_config_variable_initial_accounts( - initial_accounts: Vec, - ) -> SequencerConfig { + fn setup_sequencer_config() -> SequencerConfig { let tempdir = tempfile::tempdir().unwrap(); let home = tempdir.path().to_path_buf(); SequencerConfig { home, - override_rust_log: Some("info".to_owned()), genesis_id: 1, is_genesis_random: false, max_num_tx_in_block: 10, max_block_size: bytesize::ByteSize::mib(1), mempool_max_size: 10000, block_create_timeout: Duration::from_secs(1), - port: 8080, - initial_accounts, - initial_commitments: vec![], signing_key: *sequencer_sign_key_for_testing().value(), bedrock_config: BedrockConfig { backoff: BackoffConfig { @@ -407,43 +425,19 @@ mod tests { node_url: "http://not-used-in-unit-tests".parse().unwrap(), auth: None, }, - retry_pending_blocks_timeout: Duration::from_secs(60 * 4), + retry_pending_blocks_timeout: Duration::from_mins(4), indexer_rpc_url: "ws://localhost:8779".parse().unwrap(), + initial_public_accounts: None, + initial_private_accounts: None, } } - fn setup_sequencer_config() -> SequencerConfig { - let acc1_account_id: Vec = vec![ - 148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, 112, 83, - 153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102, - ]; - - let acc2_account_id: Vec = vec![ - 30, 145, 107, 3, 207, 73, 192, 230, 160, 63, 238, 207, 18, 69, 54, 216, 103, 244, 92, - 94, 124, 248, 42, 16, 141, 19, 119, 18, 14, 226, 140, 204, - ]; - - let initial_acc1 = AccountInitialData { - account_id: AccountId::from_str(&acc1_account_id.to_base58()).unwrap(), - balance: 10000, - }; - - let initial_acc2 = AccountInitialData { - account_id: AccountId::from_str(&acc2_account_id.to_base58()).unwrap(), - balance: 20000, - }; - - let initial_accounts = vec![initial_acc1, initial_acc2]; - - setup_sequencer_config_variable_initial_accounts(initial_accounts) - } - fn create_signing_key_for_account1() -> nssa::PrivateKey { - nssa::PrivateKey::try_new([1; 32]).unwrap() + initial_pub_accounts_private_keys()[0].pub_sign_key.clone() } fn create_signing_key_for_account2() -> nssa::PrivateKey { - nssa::PrivateKey::try_new([2; 32]).unwrap() + initial_pub_accounts_private_keys()[1].pub_sign_key.clone() } async fn common_setup() -> (SequencerCoreWithMockClients, MemPoolHandle) { @@ -475,10 +469,9 @@ mod tests { assert_eq!(sequencer.chain_height, config.genesis_id); assert_eq!(sequencer.sequencer_config.max_num_tx_in_block, 10); - assert_eq!(sequencer.sequencer_config.port, 8080); - let acc1_account_id = config.initial_accounts[0].account_id; - let acc2_account_id = config.initial_accounts[1].account_id; + let acc1_account_id = initial_accounts()[0].account_id; + let acc2_account_id = initial_accounts()[1].account_id; let balance_acc_1 = sequencer.state.get_account_by_id(acc1_account_id).balance; let balance_acc_2 = sequencer.state.get_account_by_id(acc2_account_id).balance; @@ -487,47 +480,6 @@ mod tests { assert_eq!(20000, balance_acc_2); } - #[tokio::test] - async fn start_different_intial_accounts_balances() { - let acc1_account_id: Vec = vec![ - 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, - 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, - ]; - - let acc2_account_id: Vec = vec![ - 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, - 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102, - ]; - - let initial_acc1 = AccountInitialData { - account_id: AccountId::from_str(&acc1_account_id.to_base58()).unwrap(), - balance: 10000, - }; - - let initial_acc2 = AccountInitialData { - account_id: AccountId::from_str(&acc2_account_id.to_base58()).unwrap(), - balance: 20000, - }; - - let initial_accounts = vec![initial_acc1, initial_acc2]; - - let config = setup_sequencer_config_variable_initial_accounts(initial_accounts); - let (sequencer, _mempool_handle) = - SequencerCoreWithMockClients::start_from_config(config.clone()).await; - - let acc1_account_id = config.initial_accounts[0].account_id; - let acc2_account_id = config.initial_accounts[1].account_id; - - assert_eq!( - 10000, - sequencer.state.get_account_by_id(acc1_account_id).balance - ); - assert_eq!( - 20000, - sequencer.state.get_account_by_id(acc2_account_id).balance - ); - } - #[test] fn transaction_pre_check_pass() { let tx = common::test_utils::produce_dummy_empty_transaction(); @@ -538,10 +490,10 @@ mod tests { #[tokio::test] async fn transaction_pre_check_native_transfer_valid() { - let (sequencer, _mempool_handle) = common_setup().await; + let (_sequencer, _mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key1 = create_signing_key_for_account1(); @@ -557,8 +509,8 @@ mod tests { async fn transaction_pre_check_native_transfer_other_signature() { let (mut sequencer, _mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key2 = create_signing_key_for_account2(); @@ -570,7 +522,7 @@ mod tests { let tx = tx.transaction_stateless_check().unwrap(); // Signature is not from sender. Execution fails - let result = sequencer.execute_check_transaction_on_state(tx); + let result = tx.execute_check_on_state(&mut sequencer.state, 0, 0); assert!(matches!( result, @@ -582,8 +534,8 @@ mod tests { async fn transaction_pre_check_native_transfer_sent_too_much() { let (mut sequencer, _mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key1 = create_signing_key_for_account1(); @@ -596,7 +548,9 @@ mod tests { // Passed pre-check assert!(result.is_ok()); - let result = sequencer.execute_check_transaction_on_state(result.unwrap()); + let result = result + .unwrap() + .execute_check_on_state(&mut sequencer.state, 0, 0); let is_failed_at_balance_mismatch = matches!( result.err().unwrap(), nssa::error::NssaError::ProgramExecutionFailed(_) @@ -609,8 +563,8 @@ mod tests { async fn transaction_execute_native_transfer() { let (mut sequencer, _mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key1 = create_signing_key_for_account1(); @@ -618,7 +572,8 @@ mod tests { acc1, 0, acc2, 100, &sign_key1, ); - sequencer.execute_check_transaction_on_state(tx).unwrap(); + tx.execute_check_on_state(&mut sequencer.state, 0, 0) + .unwrap(); let bal_from = sequencer.state.get_account_by_id(acc1).balance; let bal_to = sequencer.state.get_account_by_id(acc2).balance; @@ -671,8 +626,8 @@ mod tests { async fn replay_transactions_are_rejected_in_the_same_block() { let (mut sequencer, mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key1 = create_signing_key_for_account1(); @@ -693,18 +648,25 @@ mod tests { let block = sequencer .store .get_block_at_id(sequencer.chain_height) + .unwrap() .unwrap(); - // Only one should be included in the block - assert_eq!(block.body.transactions, vec![tx.clone()]); + // Only one user tx should be included; the clock tx is always appended last. + assert_eq!( + block.body.transactions, + vec![ + tx.clone(), + NSSATransaction::Public(clock_invocation(block.header.timestamp)) + ] + ); } #[tokio::test] async fn replay_transactions_are_rejected_in_different_blocks() { let (mut sequencer, mempool_handle) = common_setup().await; - let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id; - let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id; + let acc1 = initial_accounts()[0].account_id; + let acc2 = initial_accounts()[1].account_id; let sign_key1 = create_signing_key_for_account1(); @@ -720,8 +682,15 @@ mod tests { let block = sequencer .store .get_block_at_id(sequencer.chain_height) + .unwrap() .unwrap(); - assert_eq!(block.body.transactions, vec![tx.clone()]); + assert_eq!( + block.body.transactions, + vec![ + tx.clone(), + NSSATransaction::Public(clock_invocation(block.header.timestamp)) + ] + ); // Add same transaction should fail mempool_handle.push(tx.clone()).await.unwrap(); @@ -731,15 +700,22 @@ mod tests { let block = sequencer .store .get_block_at_id(sequencer.chain_height) + .unwrap() .unwrap(); - assert!(block.body.transactions.is_empty()); + // The replay is rejected, so only the clock tx is in the block. + assert_eq!( + block.body.transactions, + vec![NSSATransaction::Public(clock_invocation( + block.header.timestamp + ))] + ); } #[tokio::test] async fn restart_from_storage() { let config = setup_sequencer_config(); - let acc1_account_id = config.initial_accounts[0].account_id; - let acc2_account_id = config.initial_accounts[1].account_id; + let acc1_account_id = initial_accounts()[0].account_id; + let acc2_account_id = initial_accounts()[1].account_id; let balance_to_move = 13; // In the following code block a transaction will be processed that moves `balance_to_move` @@ -748,7 +724,7 @@ mod tests { { let (mut sequencer, mempool_handle) = SequencerCoreWithMockClients::start_from_config(config.clone()).await; - let signing_key = PrivateKey::try_new([1; 32]).unwrap(); + let signing_key = create_signing_key_for_account1(); let tx = common::test_utils::create_transaction_native_token_transfer( acc1_account_id, @@ -765,8 +741,15 @@ mod tests { let block = sequencer .store .get_block_at_id(sequencer.chain_height) + .unwrap() .unwrap(); - assert_eq!(block.body.transactions, vec![tx.clone()]); + assert_eq!( + block.body.transactions, + vec![ + tx.clone(), + NSSATransaction::Public(clock_invocation(block.header.timestamp)) + ] + ); } // Instantiating a new sequencer from the same config. This should load the existing block @@ -779,11 +762,11 @@ mod tests { // Balances should be consistent with the stored block assert_eq!( balance_acc_1, - config.initial_accounts[0].balance - balance_to_move + initial_accounts()[0].balance - balance_to_move ); assert_eq!( balance_acc_2, - config.initial_accounts[1].balance + balance_to_move + initial_accounts()[1].balance + balance_to_move ); } @@ -830,15 +813,15 @@ mod tests { #[tokio::test] async fn produce_block_with_correct_prev_meta_after_restart() { let config = setup_sequencer_config(); - let acc1_account_id = config.initial_accounts[0].account_id; - let acc2_account_id = config.initial_accounts[1].account_id; + let acc1_account_id = initial_accounts()[0].account_id; + let acc2_account_id = initial_accounts()[1].account_id; // Step 1: Create initial database with some block metadata let expected_prev_meta = { let (mut sequencer, mempool_handle) = SequencerCoreWithMockClients::start_from_config(config.clone()).await; - let signing_key = PrivateKey::try_new([1; 32]).unwrap(); + let signing_key = create_signing_key_for_account1(); // Add a transaction and produce a block to set up block metadata let tx = common::test_utils::create_transaction_native_token_transfer( @@ -863,7 +846,7 @@ mod tests { SequencerCoreWithMockClients::start_from_config(config.clone()).await; // Step 3: Submit a new transaction - let signing_key = PrivateKey::try_new([1; 32]).unwrap(); + let signing_key = create_signing_key_for_account1(); let tx = common::test_utils::create_transaction_native_token_transfer( acc1_account_id, 1, // Next nonce @@ -883,6 +866,7 @@ mod tests { let new_block = sequencer .store .get_block_at_id(sequencer.chain_height) + .unwrap() .unwrap(); assert_eq!( @@ -895,8 +879,54 @@ mod tests { ); assert_eq!( new_block.body.transactions, - vec![tx], - "New block should contain the submitted transaction" + vec![ + tx, + NSSATransaction::Public(clock_invocation(new_block.header.timestamp)) + ], + "New block should contain the submitted transaction and the clock invocation" + ); + } + + #[tokio::test] + async fn transactions_touching_clock_account_are_dropped_from_block() { + let (mut sequencer, mempool_handle) = common_setup().await; + + // Canonical clock invocation and a crafted variant with a different timestamp — both must + // be dropped because their diffs touch the clock accounts. + let crafted_clock_tx = { + let message = nssa::public_transaction::Message::try_new( + nssa::program::Program::clock().id(), + nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(), + vec![], + 42_u64, + ) + .unwrap(); + NSSATransaction::Public(nssa::PublicTransaction::new( + message, + nssa::public_transaction::WitnessSet::from_raw_parts(vec![]), + )) + }; + mempool_handle + .push(NSSATransaction::Public(clock_invocation(0))) + .await + .unwrap(); + mempool_handle.push(crafted_clock_tx).await.unwrap(); + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); + + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap() + .unwrap(); + + // Both transactions were dropped. Only the system-appended clock tx remains. + assert_eq!( + block.body.transactions, + vec![NSSATransaction::Public(clock_invocation( + block.header.timestamp + ))] ); } @@ -948,4 +978,86 @@ mod tests { "Chain height should NOT match the modified config.genesis_id" ); } + + #[tokio::test] + async fn user_tx_that_chain_calls_clock_is_dropped() { + let (mut sequencer, mempool_handle) = common_setup().await; + + // Deploy the clock_chain_caller test program. + let deploy_tx = + NSSATransaction::ProgramDeployment(nssa::ProgramDeploymentTransaction::new( + nssa::program_deployment_transaction::Message::new( + test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec(), + ), + )); + mempool_handle.push(deploy_tx).await.unwrap(); + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); + + // Build a user transaction that invokes clock_chain_caller, which in turn chain-calls the + // clock program with the clock accounts. The sequencer should detect that the resulting + // state diff modifies clock accounts and drop the transaction. + let clock_chain_caller_id = + nssa::program::Program::new(test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec()) + .unwrap() + .id(); + let clock_program_id = nssa::program::Program::clock().id(); + let timestamp: u64 = 0; + + let message = nssa::public_transaction::Message::try_new( + clock_chain_caller_id, + nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(), + vec![], // no signers + (clock_program_id, timestamp), + ) + .unwrap(); + let user_tx = NSSATransaction::Public(nssa::PublicTransaction::new( + message, + nssa::public_transaction::WitnessSet::from_raw_parts(vec![]), + )); + + mempool_handle.push(user_tx).await.unwrap(); + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); + + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap() + .unwrap(); + + // The user tx must have been dropped; only the mandatory clock invocation remains. + assert_eq!( + block.body.transactions, + vec![NSSATransaction::Public(clock_invocation( + block.header.timestamp + ))] + ); + } + + #[tokio::test] + async fn block_production_aborts_when_clock_account_data_is_corrupted() { + let (mut sequencer, mempool_handle) = common_setup().await; + + // Corrupt the clock 01 account data so the clock program panics on deserialization. + let clock_account_id = nssa::CLOCK_01_PROGRAM_ACCOUNT_ID; + let mut corrupted = sequencer.state.get_account_by_id(clock_account_id); + corrupted.data = vec![0xff; 3].try_into().unwrap(); + sequencer + .state + .force_insert_account(clock_account_id, corrupted); + + // Push a dummy transaction so the mempool is non-empty. + let tx = common::test_utils::produce_dummy_empty_transaction(); + mempool_handle.push(tx).await.unwrap(); + + // Block production must fail because the appended clock tx cannot execute. + let result = sequencer.produce_new_block_with_mempool_transactions(); + assert!( + result.is_err(), + "Block production should abort when clock account data is corrupted" + ); + } } diff --git a/sequencer_core/src/mock.rs b/sequencer/core/src/mock.rs similarity index 100% rename from sequencer_core/src/mock.rs rename to sequencer/core/src/mock.rs diff --git a/sequencer_runner/Cargo.toml b/sequencer/service/Cargo.toml similarity index 63% rename from sequencer_runner/Cargo.toml rename to sequencer/service/Cargo.toml index 71404d13..6fee808c 100644 --- a/sequencer_runner/Cargo.toml +++ b/sequencer/service/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "sequencer_runner" +name = "sequencer_service" version = "0.1.0" edition = "2024" license = { workspace = true } @@ -9,20 +9,25 @@ workspace = true [dependencies] common.workspace = true +nssa.workspace = true +mempool.workspace = true sequencer_core = { workspace = true, features = ["testnet"] } -sequencer_rpc.workspace = true +sequencer_service_protocol.workspace = true +sequencer_service_rpc = { workspace = true, features = ["server"] } indexer_service_rpc = { workspace = true, features = ["client"] } clap = { workspace = true, features = ["derive", "env"] } anyhow.workspace = true env_logger.workspace = true log.workspace = true -actix.workspace = true -actix-web.workspace = true tokio.workspace = true +tokio-util.workspace = true +jsonrpsee.workspace = true futures.workspace = true +bytesize.workspace = true +borsh.workspace = true [features] default = [] # Runs the sequencer in standalone mode without depending on Bedrock and Indexer services. -standalone = ["sequencer_core/mock", "sequencer_rpc/standalone"] +standalone = ["sequencer_core/mock"] diff --git a/sequencer_runner/Dockerfile b/sequencer/service/Dockerfile similarity index 61% rename from sequencer_runner/Dockerfile rename to sequencer/service/Dockerfile index 0efdf561..10641e9a 100644 --- a/sequencer_runner/Dockerfile +++ b/sequencer/service/Dockerfile @@ -26,7 +26,7 @@ RUN ARCH=$(uname -m); \ else \ echo "Using manual build for $ARCH"; \ git clone --depth 1 --branch release-3.0 https://github.com/risc0/risc0.git; \ - git clone --depth 1 --branch r0.1.94.0 https://github.com/risc0/rust.git; \ + git clone --depth 1 --branch r0.1.91.0 https://github.com/risc0/rust.git; \ cd /risc0; \ cargo install --path rzup; \ rzup build --path /rust rust --verbose; \ @@ -40,7 +40,7 @@ RUN r0vm --version # Install logos blockchain circuits RUN curl -sSL https://raw.githubusercontent.com/logos-blockchain/logos-blockchain/main/scripts/setup-logos-blockchain-circuits.sh | bash -WORKDIR /sequencer_runner +WORKDIR /sequencer_service # Build argument to enable standalone feature (defaults to false) ARG STANDALONE=false @@ -48,47 +48,49 @@ ARG STANDALONE=false # Planner stage - generates dependency recipe FROM chef AS planner COPY . . -RUN cargo chef prepare --bin sequencer_runner --recipe-path recipe.json +RUN cargo chef prepare --bin sequencer_service --recipe-path recipe.json # Builder stage - builds dependencies and application FROM chef AS builder ARG STANDALONE -COPY --from=planner /sequencer_runner/recipe.json recipe.json +COPY --from=planner /sequencer_service/recipe.json recipe.json # Build dependencies only (this layer will be cached) -RUN if [ "$STANDALONE" = "true" ]; then \ - cargo chef cook --bin sequencer_runner --features standalone --release --recipe-path recipe.json; \ +RUN --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/sequencer_service/target \ + if [ "$STANDALONE" = "true" ]; then \ + cargo chef cook --bin sequencer_service --features standalone --release --recipe-path recipe.json; \ else \ - cargo chef cook --bin sequencer_runner --release --recipe-path recipe.json; \ + cargo chef cook --bin sequencer_service --release --recipe-path recipe.json; \ fi # Copy source code COPY . . -# Build the actual application -RUN if [ "$STANDALONE" = "true" ]; then \ - cargo build --release --features standalone --bin sequencer_runner; \ +# Build the actual application and copy the binary out of the cache mount +RUN --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=/sequencer_service/target \ + if [ "$STANDALONE" = "true" ]; then \ + cargo build --release --features standalone --bin sequencer_service; \ else \ - cargo build --release --bin sequencer_runner; \ - fi - -# Strip debug symbols to reduce binary size -RUN strip /sequencer_runner/target/release/sequencer_runner + cargo build --release --bin sequencer_service; \ + fi \ + && strip /sequencer_service/target/release/sequencer_service \ + && cp /sequencer_service/target/release/sequencer_service /usr/local/bin/sequencer_service # Runtime stage - minimal image FROM debian:trixie-slim -# Install runtime dependencies -RUN apt-get update \ - && apt-get install -y gosu jq \ - && rm -rf /var/lib/apt/lists/* - # Create non-root user for security RUN useradd -m -u 1000 -s /bin/bash sequencer_user && \ - mkdir -p /sequencer_runner /etc/sequencer_runner && \ - chown -R sequencer_user:sequencer_user /sequencer_runner /etc/sequencer_runner + mkdir -p /sequencer_service /etc/sequencer_service /var/lib/sequencer_service && \ + chown -R sequencer_user:sequencer_user /sequencer_service /etc/sequencer_service /var/lib/sequencer_service # Copy binary from builder -COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_runner/target/release/sequencer_runner /usr/local/bin/sequencer_runner +COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/sequencer_service /usr/local/bin/sequencer_service # Copy r0vm binary from builder COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /usr/local/bin/r0vm @@ -96,9 +98,7 @@ COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /u # Copy logos blockchain circuits from builder COPY --from=builder --chown=sequencer_user:sequencer_user /root/.logos-blockchain-circuits /home/sequencer_user/.logos-blockchain-circuits -# Copy entrypoint script -COPY sequencer_runner/docker-entrypoint.sh /docker-entrypoint.sh -RUN chmod +x /docker-entrypoint.sh +VOLUME /var/lib/sequencer_service # Expose default port EXPOSE 3040 @@ -120,9 +120,7 @@ ENV RUST_LOG=info # Set explicit location for r0vm binary ENV RISC0_SERVER_PATH=/usr/local/bin/r0vm -USER root +USER sequencer_user -ENTRYPOINT ["/docker-entrypoint.sh"] - -WORKDIR /sequencer_runner -CMD ["sequencer_runner", "/etc/sequencer_runner"] +WORKDIR /sequencer_service +CMD ["sequencer_service", "/etc/sequencer_service/sequencer_config.json"] diff --git a/sequencer_runner/configs/debug/sequencer_config.json b/sequencer/service/configs/debug/sequencer_config.json similarity index 62% rename from sequencer_runner/configs/debug/sequencer_config.json rename to sequencer/service/configs/debug/sequencer_config.json index 2313ae20..4088fc4a 100644 --- a/sequencer_runner/configs/debug/sequencer_config.json +++ b/sequencer/service/configs/debug/sequencer_config.json @@ -1,6 +1,5 @@ { "home": ".", - "override_rust_log": null, "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, @@ -8,7 +7,6 @@ "mempool_max_size": 1000, "block_create_timeout": "15s", "retry_pending_blocks_timeout": "5s", - "port": 3040, "bedrock_config": { "backoff": { "start_delay": "100ms", @@ -20,50 +18,50 @@ "indexer_rpc_url": "ws://localhost:8779", "initial_accounts": [ { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", + "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r", "balance": 10000 }, { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", + "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2", "balance": 20000 } ], "initial_commitments": [ { - "npk":[ - 177, - 64, - 1, + "npk": [ + 139, + 19, + 158, 11, - 87, - 38, - 254, - 159, + 155, 231, - 165, - 1, - 94, - 64, - 137, - 243, - 76, - 249, - 101, - 251, - 129, - 33, - 101, - 189, - 30, - 42, - 11, - 191, - 34, - 103, - 186, - 227, - 230 - ] , + 85, + 206, + 132, + 228, + 220, + 114, + 145, + 89, + 113, + 156, + 238, + 142, + 242, + 74, + 182, + 91, + 43, + 100, + 6, + 190, + 31, + 15, + 31, + 88, + 96, + 204 + ], "account": { "program_owner": [ 0, @@ -82,38 +80,38 @@ }, { "npk": [ - 32, - 67, - 72, - 164, - 106, - 53, - 66, - 239, - 141, - 15, - 52, - 230, - 136, - 177, - 2, - 236, - 207, - 243, + 173, 134, - 135, - 210, - 143, - 87, - 232, + 33, + 223, + 54, + 226, + 10, + 71, 215, - 128, - 194, - 120, - 113, - 224, - 4, - 165 + 254, + 143, + 172, + 24, + 244, + 243, + 208, + 65, + 112, + 118, + 70, + 217, + 240, + 69, + 100, + 129, + 3, + 121, + 25, + 213, + 132, + 42, + 45 ], "account": { "program_owner": [ @@ -166,4 +164,4 @@ 37, 37 ] -} +} \ No newline at end of file diff --git a/sequencer_runner/configs/docker/sequencer_config.json b/sequencer/service/configs/docker/sequencer_config.json similarity index 64% rename from sequencer_runner/configs/docker/sequencer_config.json rename to sequencer/service/configs/docker/sequencer_config.json index ce79f4e2..f5a243d5 100644 --- a/sequencer_runner/configs/docker/sequencer_config.json +++ b/sequencer/service/configs/docker/sequencer_config.json @@ -1,13 +1,11 @@ { - "home": "/var/lib/sequencer_runner", - "override_rust_log": null, + "home": "/var/lib/sequencer_service", "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, "max_block_size": "1 MiB", "mempool_max_size": 10000, "block_create_timeout": "10s", - "port": 3040, "retry_pending_blocks_timeout": "7s", "bedrock_config": { "backoff": { @@ -20,49 +18,49 @@ "indexer_rpc_url": "ws://localhost:8779", "initial_accounts": [ { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", + "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r", "balance": 10000 }, { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", + "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2", "balance": 20000 } ], "initial_commitments": [ { "npk": [ - 63, - 202, - 178, + 139, + 19, + 158, + 11, + 155, 231, - 183, - 82, - 237, - 212, - 216, - 221, - 215, - 255, - 153, - 101, - 177, - 161, - 254, - 210, - 128, - 122, - 54, - 190, - 230, - 151, - 183, - 64, - 225, - 229, - 113, - 1, + 85, + 206, + 132, 228, - 97 + 220, + 114, + 145, + 89, + 113, + 156, + 238, + 142, + 242, + 74, + 182, + 91, + 43, + 100, + 6, + 190, + 31, + 15, + 31, + 88, + 96, + 204 ], "account": { "program_owner": [ @@ -82,38 +80,38 @@ }, { "npk": [ - 192, - 251, - 166, - 243, - 167, - 236, - 84, - 249, - 35, - 136, - 130, + 173, + 134, + 33, + 223, + 54, + 226, + 10, + 71, + 215, + 254, + 143, 172, - 219, - 225, - 161, - 139, - 229, - 89, - 243, - 125, - 194, - 213, - 209, - 30, - 23, - 174, - 100, + 24, 244, - 124, - 74, - 140, - 47 + 243, + 208, + 65, + 112, + 118, + 70, + 217, + 240, + 69, + 100, + 129, + 3, + 121, + 25, + 213, + 132, + 42, + 45 ], "account": { "program_owner": [ diff --git a/sequencer/service/docker-compose.yml b/sequencer/service/docker-compose.yml new file mode 100644 index 00000000..cede8143 --- /dev/null +++ b/sequencer/service/docker-compose.yml @@ -0,0 +1,17 @@ +services: + sequencer_service: + image: lssa/sequencer_service + build: + context: ../.. + dockerfile: sequencer/service/Dockerfile + container_name: sequencer_service + ports: + - "3040:3040" + volumes: + # Mount configuration file + - ./configs/docker/sequencer_config.json:/etc/sequencer_service/sequencer_config.json + # Mount data volume + - sequencer_data:/var/lib/sequencer_service + +volumes: + sequencer_data: diff --git a/sequencer/service/protocol/Cargo.toml b/sequencer/service/protocol/Cargo.toml new file mode 100644 index 00000000..be913104 --- /dev/null +++ b/sequencer/service/protocol/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "sequencer_service_protocol" +version = "0.1.0" +edition = "2024" +license = { workspace = true } + +[lints] +workspace = true + +[dependencies] +common.workspace = true +nssa.workspace = true +nssa_core.workspace = true diff --git a/sequencer/service/protocol/src/lib.rs b/sequencer/service/protocol/src/lib.rs new file mode 100644 index 00000000..ec0020ac --- /dev/null +++ b/sequencer/service/protocol/src/lib.rs @@ -0,0 +1,5 @@ +//! Reexports of types used by sequencer rpc specification. + +pub use common::{HashType, block::Block, transaction::NSSATransaction}; +pub use nssa::{Account, AccountId, ProgramId}; +pub use nssa_core::{BlockId, Commitment, MembershipProof, account::Nonce}; diff --git a/sequencer/service/rpc/Cargo.toml b/sequencer/service/rpc/Cargo.toml new file mode 100644 index 00000000..d8f16b86 --- /dev/null +++ b/sequencer/service/rpc/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "sequencer_service_rpc" +version = "0.1.0" +edition = "2024" +license = { workspace = true } + +[lints] +workspace = true + +[dependencies] +sequencer_service_protocol.workspace = true + +jsonrpsee = { workspace = true, features = ["macros"] } + +[features] +client = ["jsonrpsee/client"] +server = ["jsonrpsee/server"] diff --git a/sequencer/service/rpc/src/lib.rs b/sequencer/service/rpc/src/lib.rs new file mode 100644 index 00000000..6c03cdb6 --- /dev/null +++ b/sequencer/service/rpc/src/lib.rs @@ -0,0 +1,92 @@ +use std::collections::BTreeMap; + +use jsonrpsee::proc_macros::rpc; +#[cfg(feature = "server")] +use jsonrpsee::types::ErrorObjectOwned; +#[cfg(feature = "client")] +pub use jsonrpsee::{core::ClientError, http_client::HttpClientBuilder as SequencerClientBuilder}; +use sequencer_service_protocol::{ + Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, NSSATransaction, + Nonce, ProgramId, +}; + +#[cfg(all(not(feature = "server"), not(feature = "client")))] +compile_error!("At least one of `server` or `client` features must be enabled."); + +/// Type alias for RPC client. Only available when `client` feature is enabled. +/// +/// It's cheap to clone this client, so it can be cloned and shared across the application. +/// +/// # Example +/// +/// ```no_run +/// use common::transaction::NSSATransaction; +/// use sequencer_service_rpc::{RpcClient as _, SequencerClientBuilder}; +/// +/// let url = "http://localhost:3040".parse()?; +/// let client = SequencerClientBuilder::default().build(url)?; +/// +/// let tx: NSSATransaction = unimplemented!("Construct your transaction here"); +/// let tx_hash = client.send_transaction(tx).await?; +/// ``` +#[cfg(feature = "client")] +pub type SequencerClient = jsonrpsee::http_client::HttpClient; + +#[cfg_attr(all(feature = "server", not(feature = "client")), rpc(server))] +#[cfg_attr(all(feature = "client", not(feature = "server")), rpc(client))] +#[cfg_attr(all(feature = "server", feature = "client"), rpc(server, client))] +pub trait Rpc { + #[method(name = "sendTransaction")] + async fn send_transaction(&self, tx: NSSATransaction) -> Result; + + // TODO: expand healthcheck response into some kind of report + #[method(name = "checkHealth")] + async fn check_health(&self) -> Result<(), ErrorObjectOwned>; + + // TODO: These functions should be removed after wallet starts using indexer + // for this type of queries. + // + // ============================================================================================= + + #[method(name = "getBlock")] + async fn get_block(&self, block_id: BlockId) -> Result, ErrorObjectOwned>; + + #[method(name = "getBlockRange")] + async fn get_block_range( + &self, + start_block_id: BlockId, + end_block_id: BlockId, + ) -> Result, ErrorObjectOwned>; + + #[method(name = "getLastBlockId")] + async fn get_last_block_id(&self) -> Result; + + #[method(name = "getAccountBalance")] + async fn get_account_balance(&self, account_id: AccountId) -> Result; + + #[method(name = "getTransaction")] + async fn get_transaction( + &self, + tx_hash: HashType, + ) -> Result, ErrorObjectOwned>; + + #[method(name = "getAccountsNonces")] + async fn get_accounts_nonces( + &self, + account_ids: Vec, + ) -> Result, ErrorObjectOwned>; + + #[method(name = "getProofForCommitment")] + async fn get_proof_for_commitment( + &self, + commitment: Commitment, + ) -> Result, ErrorObjectOwned>; + + #[method(name = "getAccount")] + async fn get_account(&self, account_id: AccountId) -> Result; + + #[method(name = "getProgramIds")] + async fn get_program_ids(&self) -> Result, ErrorObjectOwned>; + + // ============================================================================================= +} diff --git a/sequencer_runner/src/lib.rs b/sequencer/service/src/lib.rs similarity index 65% rename from sequencer_runner/src/lib.rs rename to sequencer/service/src/lib.rs index a17ecbf9..5373b31f 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer/service/src/lib.rs @@ -1,59 +1,75 @@ -use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; -use actix_web::dev::ServerHandle; -use anyhow::{Context as _, Result}; -use clap::Parser; -use common::rpc_primitives::RpcConfig; -use futures::{FutureExt as _, never::Never}; +use anyhow::{Context as _, Result, anyhow}; +use bytesize::ByteSize; +use common::transaction::NSSATransaction; +use futures::never::Never; +use jsonrpsee::server::ServerHandle; #[cfg(not(feature = "standalone"))] use log::warn; use log::{error, info}; +use mempool::MemPoolHandle; #[cfg(feature = "standalone")] use sequencer_core::SequencerCoreWithMockClients as SequencerCore; -use sequencer_core::config::SequencerConfig; +pub use sequencer_core::config::*; #[cfg(not(feature = "standalone"))] use sequencer_core::{SequencerCore, block_settlement_client::BlockSettlementClientTrait as _}; -use sequencer_rpc::new_http_server; +use sequencer_service_rpc::RpcServer as _; use tokio::{sync::Mutex, task::JoinHandle}; -pub const RUST_LOG: &str = "RUST_LOG"; +pub mod service; -#[derive(Parser, Debug)] -#[clap(version)] -struct Args { - /// Path to configs. - home_dir: PathBuf, -} +const REQUEST_BODY_MAX_SIZE: ByteSize = ByteSize::mib(10); /// Handle to manage the sequencer and its tasks. /// -/// Implements `Drop` to ensure all tasks are aborted and the HTTP server is stopped when dropped. +/// Implements `Drop` to ensure all tasks are aborted and the RPC server is stopped when dropped. pub struct SequencerHandle { addr: SocketAddr, - http_server_handle: ServerHandle, + /// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`. + server_handle: Option, main_loop_handle: JoinHandle>, retry_pending_blocks_loop_handle: JoinHandle>, listen_for_bedrock_blocks_loop_handle: JoinHandle>, } impl SequencerHandle { - /// Runs the sequencer indefinitely, monitoring its tasks. - /// - /// If no error occurs, this function will never return. + const fn new( + addr: SocketAddr, + server_handle: ServerHandle, + main_loop_handle: JoinHandle>, + retry_pending_blocks_loop_handle: JoinHandle>, + listen_for_bedrock_blocks_loop_handle: JoinHandle>, + ) -> Self { + Self { + addr, + server_handle: Some(server_handle), + main_loop_handle, + retry_pending_blocks_loop_handle, + listen_for_bedrock_blocks_loop_handle, + } + } + + /// Wait for any of the sequencer tasks to fail and return the error. #[expect( clippy::integer_division_remainder_used, reason = "Generated by select! macro, can't be easily rewritten to avoid this lint" )] - pub async fn run_forever(&mut self) -> Result { + pub async fn failed(mut self) -> Result { let Self { addr: _, - http_server_handle: _, + server_handle, main_loop_handle, retry_pending_blocks_loop_handle, listen_for_bedrock_blocks_loop_handle, - } = self; + } = &mut self; + + let server_handle = server_handle.take().expect("Server handle is set"); tokio::select! { + () = server_handle.stopped() => { + Err(anyhow!("RPC Server stopped")) + } res = main_loop_handle => { res .context("Main loop task panicked")? @@ -72,11 +88,25 @@ impl SequencerHandle { } } + /// Check if all Sequencer tasks are still running. + /// + /// Return `false` if any of the tasks has failed and `true` otherwise. + /// Error of the failed task can be retrieved by awaiting on [`Self::failed()`]. #[must_use] - pub fn is_finished(&self) -> bool { - self.main_loop_handle.is_finished() - || self.retry_pending_blocks_loop_handle.is_finished() - || self.listen_for_bedrock_blocks_loop_handle.is_finished() + pub fn is_healthy(&self) -> bool { + let Self { + addr: _, + server_handle, + main_loop_handle, + retry_pending_blocks_loop_handle, + listen_for_bedrock_blocks_loop_handle, + } = self; + + let stopped = server_handle.as_ref().is_none_or(ServerHandle::is_stopped) + || main_loop_handle.is_finished() + || retry_pending_blocks_loop_handle.is_finished() + || listen_for_bedrock_blocks_loop_handle.is_finished(); + !stopped } #[must_use] @@ -89,7 +119,7 @@ impl Drop for SequencerHandle { fn drop(&mut self) { let Self { addr: _, - http_server_handle, + server_handle, main_loop_handle, retry_pending_blocks_loop_handle, listen_for_bedrock_blocks_loop_handle, @@ -99,31 +129,35 @@ impl Drop for SequencerHandle { retry_pending_blocks_loop_handle.abort(); listen_for_bedrock_blocks_loop_handle.abort(); - // Can't wait here as Drop can't be async, but anyway stop signal should be sent - http_server_handle.stop(true).now_or_never(); + let Some(handle) = server_handle else { + return; + }; + + if let Err(err) = handle.stop() { + error!("An error occurred while stopping Sequencer RPC server: {err}"); + } } } -pub async fn startup_sequencer(app_config: SequencerConfig) -> Result { - let block_timeout = app_config.block_create_timeout; - let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout; - let port = app_config.port; +pub async fn run(config: SequencerConfig, port: u16) -> Result { + let block_timeout = config.block_create_timeout; + let retry_pending_blocks_timeout = config.retry_pending_blocks_timeout; + let max_block_size = config.max_block_size; - let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config).await; + let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(config).await; info!("Sequencer core set up"); let seq_core_wrapped = Arc::new(Mutex::new(sequencer_core)); - let (http_server, addr) = new_http_server( - RpcConfig::with_port(port), + let (server_handle, addr) = run_server( Arc::clone(&seq_core_wrapped), mempool_handle, + port, + max_block_size.as_u64(), ) .await?; - info!("HTTP server started"); - let http_server_handle = http_server.handle(); - tokio::spawn(http_server); + info!("RPC server started"); #[cfg(not(feature = "standalone"))] { @@ -146,13 +180,42 @@ pub async fn startup_sequencer(app_config: SequencerConfig) -> Result>, + mempool_handle: MemPoolHandle, + port: u16, + max_block_size: u64, +) -> Result<(ServerHandle, SocketAddr)> { + let server = jsonrpsee::server::ServerBuilder::with_config( + jsonrpsee::server::ServerConfigBuilder::new() + .max_request_body_size( + u32::try_from(REQUEST_BODY_MAX_SIZE.as_u64()) + .expect("REQUEST_BODY_MAX_SIZE should be less than u32::MAX"), + ) + .build(), + ) + .build(SocketAddr::from(([0, 0, 0, 0], port))) + .await + .context("Failed to build RPC server")?; + + let addr = server + .local_addr() + .context("Failed to get local address of RPC server")?; + + info!("Starting Sequencer Service RPC server on {addr}"); + + let service = service::SequencerService::new(sequencer, mempool_handle, max_block_size); + let handle = server.start(service.into_rpc()); + Ok((handle, addr)) } async fn main_loop(seq_core: Arc>, block_timeout: Duration) -> Result { @@ -210,7 +273,7 @@ async fn retry_pending_blocks(seq_core: &Arc>) -> Result<() .create_inscribe_tx(block) .context("Failed to create inscribe tx for pending block")?; - debug!(">>>> Create inscribe: {:?}", now.elapsed()); + debug!("Create inscribe: {:?}", now.elapsed()); let now = Instant::now(); if let Err(e) = block_settlement_client @@ -222,7 +285,7 @@ async fn retry_pending_blocks(seq_core: &Arc>) -> Result<() block.header.block_id ); } - debug!(">>>> Post: {:?}", now.elapsed()); + debug!("Post: {:?}", now.elapsed()); } Ok(()) } @@ -287,33 +350,3 @@ async fn retry_pending_blocks_loop( ) -> Result { std::future::pending::>().await } - -pub async fn main_runner() -> Result<()> { - env_logger::init(); - - let args = Args::parse(); - let Args { home_dir } = args; - - let app_config = SequencerConfig::from_path(&home_dir.join("sequencer_config.json"))?; - - if let Some(rust_log) = &app_config.override_rust_log { - info!("RUST_LOG env var set to {rust_log:?}"); - - // SAFETY: there is no other threads running at this point - unsafe { - std::env::set_var(RUST_LOG, rust_log); - } - } - - // ToDo: Add restart on failures - let mut sequencer_handle = startup_sequencer(app_config).await?; - - info!("Sequencer running. Monitoring concurrent tasks..."); - - let Err(err) = sequencer_handle.run_forever().await; - error!("Sequencer failed: {err:#}"); - - info!("Shutting down sequencer..."); - - Ok(()) -} diff --git a/sequencer/service/src/main.rs b/sequencer/service/src/main.rs new file mode 100644 index 00000000..e78ad502 --- /dev/null +++ b/sequencer/service/src/main.rs @@ -0,0 +1,60 @@ +use std::path::PathBuf; + +use anyhow::Result; +use clap::Parser; +use log::{error, info}; +use tokio_util::sync::CancellationToken; + +#[derive(Debug, Parser)] +#[clap(version)] +struct Args { + #[clap(name = "config")] + config_path: PathBuf, + #[clap(short, long, default_value = "3040")] + port: u16, +} + +#[tokio::main] +#[expect( + clippy::integer_division_remainder_used, + reason = "Generated by select! macro, can't be easily rewritten to avoid this lint" +)] +async fn main() -> Result<()> { + env_logger::init(); + + let Args { config_path, port } = Args::parse(); + + let cancellation_token = listen_for_shutdown_signal(); + + let config = sequencer_service::SequencerConfig::from_path(&config_path)?; + let sequencer_handle = sequencer_service::run(config, port).await?; + + tokio::select! { + () = cancellation_token.cancelled() => { + info!("Shutting down sequencer..."); + } + Err(err) = sequencer_handle.failed() => { + error!("Sequencer failed unexpectedly: {err}"); + } + } + + info!("Sequencer shutdown complete"); + + Ok(()) +} + +fn listen_for_shutdown_signal() -> CancellationToken { + let cancellation_token = CancellationToken::new(); + let cancellation_token_clone = cancellation_token.clone(); + + tokio::spawn(async move { + if let Err(err) = tokio::signal::ctrl_c().await { + error!("Failed to listen for Ctrl-C signal: {err}"); + return; + } + info!("Received Ctrl-C signal"); + cancellation_token_clone.cancel(); + }); + + cancellation_token +} diff --git a/sequencer/service/src/service.rs b/sequencer/service/src/service.rs new file mode 100644 index 00000000..71645363 --- /dev/null +++ b/sequencer/service/src/service.rs @@ -0,0 +1,183 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use common::transaction::NSSATransaction; +use jsonrpsee::{ + core::async_trait, + types::{ErrorCode, ErrorObjectOwned}, +}; +use log::warn; +use mempool::MemPoolHandle; +use nssa::{self, program::Program}; +use sequencer_core::{ + DbError, SequencerCore, block_settlement_client::BlockSettlementClientTrait, + indexer_client::IndexerClientTrait, +}; +use sequencer_service_protocol::{ + Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, Nonce, ProgramId, +}; +use tokio::sync::Mutex; + +const NOT_FOUND_ERROR_CODE: i32 = -31999; + +pub struct SequencerService { + sequencer: Arc>>, + mempool_handle: MemPoolHandle, + max_block_size: u64, +} + +impl SequencerService { + pub const fn new( + sequencer: Arc>>, + mempool_handle: MemPoolHandle, + max_block_size: u64, + ) -> Self { + Self { + sequencer, + mempool_handle, + max_block_size, + } + } +} + +#[async_trait] +impl + sequencer_service_rpc::RpcServer for SequencerService +{ + async fn send_transaction(&self, tx: NSSATransaction) -> Result { + // Reserve ~200 bytes for block header overhead + const BLOCK_HEADER_OVERHEAD: u64 = 200; + + let tx_hash = tx.hash(); + + let encoded_tx = + borsh::to_vec(&tx).expect("Transaction borsh serialization should not fail"); + let tx_size = u64::try_from(encoded_tx.len()).expect("Transaction size should fit in u64"); + + let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD); + + if tx_size > max_tx_size { + return Err(ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + format!("Transaction too large: size {tx_size}, max {max_tx_size}"), + None::<()>, + )); + } + + let authenticated_tx = tx + .transaction_stateless_check() + .inspect_err(|err| warn!("Error at pre_check {err:#?}")) + .map_err(|err| { + ErrorObjectOwned::owned( + ErrorCode::InvalidParams.code(), + format!("{err:?}"), + None::<()>, + ) + })?; + + self.mempool_handle + .push(authenticated_tx) + .await + .expect("Mempool is closed, this is a bug"); + + Ok(tx_hash) + } + + async fn check_health(&self) -> Result<(), ErrorObjectOwned> { + Ok(()) + } + + async fn get_block(&self, block_id: BlockId) -> Result, ErrorObjectOwned> { + let sequencer = self.sequencer.lock().await; + sequencer + .block_store() + .get_block_at_id(block_id) + .map_err(|err| internal_error(&err)) + } + + async fn get_block_range( + &self, + start_block_id: BlockId, + end_block_id: BlockId, + ) -> Result, ErrorObjectOwned> { + let sequencer = self.sequencer.lock().await; + (start_block_id..=end_block_id) + .map(|block_id| { + let block = sequencer + .block_store() + .get_block_at_id(block_id) + .map_err(|err| internal_error(&err))?; + block.ok_or_else(|| { + ErrorObjectOwned::owned( + NOT_FOUND_ERROR_CODE, + format!("Block with id {block_id} not found"), + None::<()>, + ) + }) + }) + .collect::, _>>() + } + + async fn get_last_block_id(&self) -> Result { + let sequencer = self.sequencer.lock().await; + Ok(sequencer.chain_height()) + } + + async fn get_account_balance(&self, account_id: AccountId) -> Result { + let sequencer = self.sequencer.lock().await; + let account = sequencer.state().get_account_by_id(account_id); + Ok(account.balance) + } + + async fn get_transaction( + &self, + tx_hash: HashType, + ) -> Result, ErrorObjectOwned> { + let sequencer = self.sequencer.lock().await; + Ok(sequencer.block_store().get_transaction_by_hash(tx_hash)) + } + + async fn get_accounts_nonces( + &self, + account_ids: Vec, + ) -> Result, ErrorObjectOwned> { + let sequencer = self.sequencer.lock().await; + let nonces = account_ids + .into_iter() + .map(|account_id| sequencer.state().get_account_by_id(account_id).nonce) + .collect(); + Ok(nonces) + } + + async fn get_proof_for_commitment( + &self, + commitment: Commitment, + ) -> Result, ErrorObjectOwned> { + let sequencer = self.sequencer.lock().await; + Ok(sequencer.state().get_proof_for_commitment(&commitment)) + } + + async fn get_account(&self, account_id: AccountId) -> Result { + let sequencer = self.sequencer.lock().await; + Ok(sequencer.state().get_account_by_id(account_id)) + } + + async fn get_program_ids(&self) -> Result, ErrorObjectOwned> { + let mut program_ids = BTreeMap::new(); + program_ids.insert( + "authenticated_transfer".to_owned(), + Program::authenticated_transfer_program().id(), + ); + program_ids.insert("token".to_owned(), Program::token().id()); + program_ids.insert("pinata".to_owned(), Program::pinata().id()); + program_ids.insert("amm".to_owned(), Program::amm().id()); + program_ids.insert( + "privacy_preserving_circuit".to_owned(), + nssa::PRIVACY_PRESERVING_CIRCUIT_ID, + ); + Ok(program_ids) + } +} + +fn internal_error(err: &DbError) -> ErrorObjectOwned { + ErrorObjectOwned::owned(ErrorCode::InternalError.code(), err.to_string(), None::<()>) +} diff --git a/sequencer_rpc/Cargo.toml b/sequencer_rpc/Cargo.toml deleted file mode 100644 index 5c76ba34..00000000 --- a/sequencer_rpc/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "sequencer_rpc" -version = "0.1.0" -edition = "2024" -license = { workspace = true } - -[lints] -workspace = true - -[dependencies] -nssa.workspace = true -common.workspace = true -mempool.workspace = true -sequencer_core = { workspace = true } -bedrock_client.workspace = true - -anyhow.workspace = true -serde_json.workspace = true -log.workspace = true -serde.workspace = true -actix-cors.workspace = true -futures.workspace = true -base58.workspace = true -hex.workspace = true -tempfile.workspace = true -base64.workspace = true -itertools.workspace = true -actix-web.workspace = true -tokio.workspace = true -borsh.workspace = true -bytesize.workspace = true - -[dev-dependencies] -sequencer_core = { workspace = true, features = ["mock"] } - -[features] -default = [] -# Includes types to run the sequencer in standalone mode -standalone = ["sequencer_core/mock"] diff --git a/sequencer_rpc/src/lib.rs b/sequencer_rpc/src/lib.rs deleted file mode 100644 index 47e4fa75..00000000 --- a/sequencer_rpc/src/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::sync::Arc; - -use common::{ - rpc_primitives::errors::{RpcError, RpcErrorKind}, - transaction::NSSATransaction, -}; -use mempool::MemPoolHandle; -pub use net_utils::*; -#[cfg(feature = "standalone")] -use sequencer_core::mock::{MockBlockSettlementClient, MockIndexerClient}; -use sequencer_core::{ - SequencerCore, - block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait}, - indexer_client::{IndexerClient, IndexerClientTrait}, -}; -use serde::Serialize; -use serde_json::Value; -use tokio::sync::Mutex; - -use self::types::err_rpc::RpcErr; - -pub mod net_utils; -pub mod process; -pub mod types; - -#[cfg(feature = "standalone")] -pub type JsonHandlerWithMockClients = JsonHandler; - -// ToDo: Add necessary fields -pub struct JsonHandler< - BC: BlockSettlementClientTrait = BlockSettlementClient, - IC: IndexerClientTrait = IndexerClient, -> { - sequencer_state: Arc>>, - mempool_handle: MemPoolHandle, - max_block_size: usize, -} - -fn respond(val: T) -> Result { - Ok(serde_json::to_value(val)?) -} - -#[must_use] -pub fn rpc_error_responce_inverter(err: RpcError) -> RpcError { - let content = err.error_struct.map(|error| match error { - RpcErrorKind::HandlerError(val) | RpcErrorKind::InternalError(val) => val, - RpcErrorKind::RequestValidationError(vall) => serde_json::to_value(vall).unwrap(), - }); - RpcError { - error_struct: None, - code: err.code, - message: err.message, - data: content, - } -} diff --git a/sequencer_rpc/src/net_utils.rs b/sequencer_rpc/src/net_utils.rs deleted file mode 100644 index e306ec0e..00000000 --- a/sequencer_rpc/src/net_utils.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::{io, net::SocketAddr, sync::Arc}; - -use actix_cors::Cors; -use actix_web::{App, Error as HttpError, HttpResponse, HttpServer, http, middleware, web}; -use common::{ - rpc_primitives::{RpcConfig, message::Message}, - transaction::NSSATransaction, -}; -use futures::{Future, FutureExt as _}; -use log::info; -use mempool::MemPoolHandle; -#[cfg(not(feature = "standalone"))] -use sequencer_core::SequencerCore; -#[cfg(feature = "standalone")] -use sequencer_core::SequencerCoreWithMockClients as SequencerCore; -use tokio::sync::Mutex; - -#[cfg(not(feature = "standalone"))] -use super::JsonHandler; -use crate::process::Process; - -pub const SHUTDOWN_TIMEOUT_SECS: u64 = 10; - -pub const NETWORK: &str = "network"; - -#[cfg(feature = "standalone")] -type JsonHandler = super::JsonHandlerWithMockClients; - -pub(crate) fn rpc_handler( - message: web::Json, - handler: web::Data

, -) -> impl Future> { - let response = async move { - let message = handler.process(message.0).await?; - Ok(HttpResponse::Ok().json(&message)) - }; - response.boxed() -} - -fn get_cors(cors_allowed_origins: &[String]) -> Cors { - let mut cors = Cors::permissive(); - if cors_allowed_origins != ["*".to_owned()] { - for origin in cors_allowed_origins { - cors = cors.allowed_origin(origin); - } - } - cors.allowed_methods(vec!["GET", "POST"]) - .allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT]) - .allowed_header(http::header::CONTENT_TYPE) - .max_age(3600) -} - -pub async fn new_http_server( - config: RpcConfig, - seuquencer_core: Arc>, - mempool_handle: MemPoolHandle, -) -> io::Result<(actix_web::dev::Server, SocketAddr)> { - let RpcConfig { - addr, - cors_allowed_origins, - limits_config, - } = config; - info!(target:NETWORK, "Starting HTTP server at {addr}"); - let max_block_size = seuquencer_core - .lock() - .await - .sequencer_config() - .max_block_size - .as_u64() - .try_into() - .expect("`max_block_size` is expected to fit into usize"); - let handler = web::Data::new(JsonHandler { - sequencer_state: Arc::clone(&seuquencer_core), - mempool_handle, - max_block_size, - }); - - // HTTP server - let http_server = HttpServer::new(move || { - let json_limit = limits_config - .json_payload_max_size - .as_u64() - .try_into() - .expect("`json_payload_max_size` is expected to fit into usize"); - App::new() - .wrap(get_cors(&cors_allowed_origins)) - .app_data(handler.clone()) - .app_data(web::JsonConfig::default().limit(json_limit)) - .wrap(middleware::Logger::default()) - .service(web::resource("/").route(web::post().to(rpc_handler::))) - }) - .bind(addr)? - .shutdown_timeout(SHUTDOWN_TIMEOUT_SECS) - .disable_signals(); - - let [final_addr] = http_server - .addrs() - .try_into() - .expect("Exactly one address bound is expected for sequencer HTTP server"); - - info!(target:NETWORK, "HTTP server started at {final_addr}"); - - Ok((http_server.run(), final_addr)) -} diff --git a/sequencer_rpc/src/process.rs b/sequencer_rpc/src/process.rs deleted file mode 100644 index 17c46f03..00000000 --- a/sequencer_rpc/src/process.rs +++ /dev/null @@ -1,786 +0,0 @@ -use std::collections::HashMap; - -use actix_web::Error as HttpError; -use base64::{Engine as _, engine::general_purpose}; -use common::{ - block::{AccountInitialData, HashableBlockData}, - rpc_primitives::{ - errors::RpcError, - message::{Message, Request}, - parser::RpcRequest as _, - requests::{ - GetAccountBalanceRequest, GetAccountBalanceResponse, GetAccountRequest, - GetAccountResponse, GetAccountsNoncesRequest, GetAccountsNoncesResponse, - GetBlockDataRequest, GetBlockDataResponse, GetBlockRangeDataRequest, - GetBlockRangeDataResponse, GetGenesisIdRequest, GetGenesisIdResponse, - GetInitialTestnetAccountsRequest, GetLastBlockRequest, GetLastBlockResponse, - GetProgramIdsRequest, GetProgramIdsResponse, GetProofForCommitmentRequest, - GetProofForCommitmentResponse, GetTransactionByHashRequest, - GetTransactionByHashResponse, HelloRequest, HelloResponse, SendTxRequest, - SendTxResponse, - }, - }, - transaction::{NSSATransaction, TransactionMalformationError}, -}; -use itertools::Itertools as _; -use log::warn; -use nssa::{self, program::Program}; -use sequencer_core::{ - block_settlement_client::BlockSettlementClientTrait, indexer_client::IndexerClientTrait, -}; -use serde_json::Value; - -use super::{JsonHandler, respond, types::err_rpc::RpcErr}; - -pub const HELLO: &str = "hello"; -pub const SEND_TX: &str = "send_tx"; -pub const GET_BLOCK: &str = "get_block"; -pub const GET_BLOCK_RANGE: &str = "get_block_range"; -pub const GET_GENESIS: &str = "get_genesis"; -pub const GET_LAST_BLOCK: &str = "get_last_block"; -pub const GET_ACCOUNT_BALANCE: &str = "get_account_balance"; -pub const GET_TRANSACTION_BY_HASH: &str = "get_transaction_by_hash"; -pub const GET_ACCOUNTS_NONCES: &str = "get_accounts_nonces"; -pub const GET_ACCOUNT: &str = "get_account"; -pub const GET_PROOF_FOR_COMMITMENT: &str = "get_proof_for_commitment"; -pub const GET_PROGRAM_IDS: &str = "get_program_ids"; - -pub const HELLO_FROM_SEQUENCER: &str = "HELLO_FROM_SEQUENCER"; - -pub const TRANSACTION_SUBMITTED: &str = "Transaction submitted"; - -pub const GET_INITIAL_TESTNET_ACCOUNTS: &str = "get_initial_testnet_accounts"; - -pub trait Process: Send + Sync + 'static { - fn process(&self, message: Message) -> impl Future> + Send; -} - -impl< - BC: BlockSettlementClientTrait + Send + Sync + 'static, - IC: IndexerClientTrait + Send + Sync + 'static, -> Process for JsonHandler -{ - async fn process(&self, message: Message) -> Result { - let id = message.id(); - if let Message::Request(request) = message { - let message_inner = self - .process_request_internal(request) - .await - .map_err(|e| e.0); - Ok(Message::response(id, message_inner)) - } else { - Ok(Message::error(RpcError::parse_error( - "JSON RPC Request format was expected".to_owned(), - ))) - } - } -} - -impl JsonHandler { - /// Example of request processing. - fn process_temp_hello(request: Request) -> Result { - let _hello_request = HelloRequest::parse(Some(request.params))?; - - let response = HelloResponse { - greeting: HELLO_FROM_SEQUENCER.to_owned(), - }; - - respond(response) - } - - async fn process_send_tx(&self, request: Request) -> Result { - // Check transaction size against block size limit - // Reserve ~200 bytes for block header overhead - const BLOCK_HEADER_OVERHEAD: usize = 200; - - let send_tx_req = SendTxRequest::parse(Some(request.params))?; - let tx = borsh::from_slice::(&send_tx_req.transaction).unwrap(); - - let tx_hash = tx.hash(); - - let tx_size = send_tx_req.transaction.len(); - - let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD); - - if tx_size > max_tx_size { - return Err(TransactionMalformationError::TransactionTooLarge { - size: tx_size, - max: max_tx_size, - } - .into()); - } - - let authenticated_tx = tx - .transaction_stateless_check() - .inspect_err(|err| warn!("Error at pre_check {err:#?}"))?; - - // TODO: Do we need a timeout here? It will be usable if we have too many transactions to - // process - self.mempool_handle - .push(authenticated_tx) - .await - .expect("Mempool is closed, this is a bug"); - - let response = SendTxResponse { - status: TRANSACTION_SUBMITTED.to_owned(), - tx_hash, - }; - - respond(response) - } - - async fn process_get_block_data(&self, request: Request) -> Result { - let get_block_req = GetBlockDataRequest::parse(Some(request.params))?; - - let block = { - let state = self.sequencer_state.lock().await; - - state - .block_store() - .get_block_at_id(get_block_req.block_id)? - }; - - let response = GetBlockDataResponse { - block: borsh::to_vec(&HashableBlockData::from(block)).unwrap(), - }; - - respond(response) - } - - async fn process_get_block_range_data(&self, request: Request) -> Result { - let get_block_req = GetBlockRangeDataRequest::parse(Some(request.params))?; - - let blocks = { - let state = self.sequencer_state.lock().await; - (get_block_req.start_block_id..=get_block_req.end_block_id) - .map(|block_id| state.block_store().get_block_at_id(block_id)) - .map_ok(|block| { - borsh::to_vec(&HashableBlockData::from(block)) - .expect("derived BorshSerialize should never fail") - }) - .collect::, _>>()? - }; - - let response = GetBlockRangeDataResponse { blocks }; - - respond(response) - } - - async fn process_get_genesis(&self, request: Request) -> Result { - let _get_genesis_req = GetGenesisIdRequest::parse(Some(request.params))?; - - let genesis_id = { - let state = self.sequencer_state.lock().await; - - state.block_store().genesis_id() - }; - - let response = GetGenesisIdResponse { genesis_id }; - - respond(response) - } - - async fn process_get_last_block(&self, request: Request) -> Result { - let _get_last_block_req = GetLastBlockRequest::parse(Some(request.params))?; - - let last_block = { - let state = self.sequencer_state.lock().await; - - state.chain_height() - }; - - let response = GetLastBlockResponse { last_block }; - - respond(response) - } - - /// Returns the initial accounts for testnet. - /// `ToDo`: Useful only for testnet and needs to be removed later. - async fn get_initial_testnet_accounts(&self, request: Request) -> Result { - let _get_initial_testnet_accounts_request = - GetInitialTestnetAccountsRequest::parse(Some(request.params))?; - - let initial_accounts: Vec = { - let state = self.sequencer_state.lock().await; - - state.sequencer_config().initial_accounts.clone() - }; - - respond(initial_accounts) - } - - /// Returns the balance of the account at the given `account_id`. - /// The `account_id` must be a valid hex string of the correct length. - async fn process_get_account_balance(&self, request: Request) -> Result { - let get_account_req = GetAccountBalanceRequest::parse(Some(request.params))?; - let account_id = get_account_req.account_id; - - let balance = { - let state = self.sequencer_state.lock().await; - let account = state.state().get_account_by_id(account_id); - account.balance - }; - - let response = GetAccountBalanceResponse { balance }; - - respond(response) - } - - /// Returns the nonces of the accounts at the given `account_ids`. - /// Each `account_id` must be a valid hex string of the correct length. - async fn process_get_accounts_nonces(&self, request: Request) -> Result { - let get_account_nonces_req = GetAccountsNoncesRequest::parse(Some(request.params))?; - let account_ids = get_account_nonces_req.account_ids; - - let nonces = { - let state = self.sequencer_state.lock().await; - - account_ids - .into_iter() - .map(|account_id| state.state().get_account_by_id(account_id).nonce.0) - .collect() - }; - - let response = GetAccountsNoncesResponse { nonces }; - - respond(response) - } - - /// Returns account struct for given `account_id`. - /// `AccountId` must be a valid hex string of the correct length. - async fn process_get_account(&self, request: Request) -> Result { - let get_account_nonces_req = GetAccountRequest::parse(Some(request.params))?; - - let account_id = get_account_nonces_req.account_id; - - let account = { - let state = self.sequencer_state.lock().await; - - state.state().get_account_by_id(account_id) - }; - - let response = GetAccountResponse { account }; - - respond(response) - } - - /// Returns the transaction corresponding to the given hash, if it exists in the blockchain. - /// The hash must be a valid hex string of the correct length. - async fn process_get_transaction_by_hash(&self, request: Request) -> Result { - let get_transaction_req = GetTransactionByHashRequest::parse(Some(request.params))?; - let hash = get_transaction_req.hash; - - let transaction = { - let state = self.sequencer_state.lock().await; - state - .block_store() - .get_transaction_by_hash(hash) - .map(|tx| borsh::to_vec(&tx).unwrap()) - }; - let base64_encoded = transaction.map(|tx| general_purpose::STANDARD.encode(tx)); - let response = GetTransactionByHashResponse { - transaction: base64_encoded, - }; - respond(response) - } - - /// Returns the commitment proof, corresponding to commitment. - async fn process_get_proof_by_commitment(&self, request: Request) -> Result { - let get_proof_req = GetProofForCommitmentRequest::parse(Some(request.params))?; - - let membership_proof = { - let state = self.sequencer_state.lock().await; - state - .state() - .get_proof_for_commitment(&get_proof_req.commitment) - }; - let response = GetProofForCommitmentResponse { membership_proof }; - respond(response) - } - - fn process_get_program_ids(request: Request) -> Result { - let _get_proof_req = GetProgramIdsRequest::parse(Some(request.params))?; - - let mut program_ids = HashMap::new(); - program_ids.insert( - "authenticated_transfer".to_owned(), - Program::authenticated_transfer_program().id(), - ); - program_ids.insert("token".to_owned(), Program::token().id()); - program_ids.insert("pinata".to_owned(), Program::pinata().id()); - program_ids.insert("amm".to_owned(), Program::amm().id()); - program_ids.insert( - "privacy_preserving_circuit".to_owned(), - nssa::PRIVACY_PRESERVING_CIRCUIT_ID, - ); - let response = GetProgramIdsResponse { program_ids }; - respond(response) - } - - pub async fn process_request_internal(&self, request: Request) -> Result { - match request.method.as_ref() { - HELLO => Self::process_temp_hello(request), - SEND_TX => self.process_send_tx(request).await, - GET_BLOCK => self.process_get_block_data(request).await, - GET_BLOCK_RANGE => self.process_get_block_range_data(request).await, - GET_GENESIS => self.process_get_genesis(request).await, - GET_LAST_BLOCK => self.process_get_last_block(request).await, - GET_INITIAL_TESTNET_ACCOUNTS => self.get_initial_testnet_accounts(request).await, - GET_ACCOUNT_BALANCE => self.process_get_account_balance(request).await, - GET_ACCOUNTS_NONCES => self.process_get_accounts_nonces(request).await, - GET_ACCOUNT => self.process_get_account(request).await, - GET_TRANSACTION_BY_HASH => self.process_get_transaction_by_hash(request).await, - GET_PROOF_FOR_COMMITMENT => self.process_get_proof_by_commitment(request).await, - GET_PROGRAM_IDS => Self::process_get_program_ids(request), - _ => Err(RpcErr(RpcError::method_not_found(request.method))), - } - } -} - -#[cfg(test)] -mod tests { - use std::{str::FromStr as _, sync::Arc, time::Duration}; - - use base58::ToBase58 as _; - use base64::{Engine as _, engine::general_purpose}; - use bedrock_client::BackoffConfig; - use common::{ - block::AccountInitialData, config::BasicAuth, test_utils::sequencer_sign_key_for_testing, - transaction::NSSATransaction, - }; - use nssa::AccountId; - use sequencer_core::{ - config::{BedrockConfig, SequencerConfig}, - mock::{MockBlockSettlementClient, MockIndexerClient, SequencerCoreWithMockClients}, - }; - use serde_json::Value; - use tempfile::tempdir; - use tokio::sync::Mutex; - - use crate::rpc_handler; - - type JsonHandlerWithMockClients = - crate::JsonHandler; - - fn sequencer_config_for_tests() -> SequencerConfig { - let tempdir = tempdir().unwrap(); - let home = tempdir.path().to_path_buf(); - let acc1_id: Vec = vec![ - 148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, 112, 83, - 153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102, - ]; - - let acc2_id: Vec = vec![ - 30, 145, 107, 3, 207, 73, 192, 230, 160, 63, 238, 207, 18, 69, 54, 216, 103, 244, 92, - 94, 124, 248, 42, 16, 141, 19, 119, 18, 14, 226, 140, 204, - ]; - - let initial_acc1 = AccountInitialData { - account_id: AccountId::from_str(&acc1_id.to_base58()).unwrap(), - balance: 10000, - }; - - let initial_acc2 = AccountInitialData { - account_id: AccountId::from_str(&acc2_id.to_base58()).unwrap(), - balance: 20000, - }; - - let initial_accounts = vec![initial_acc1, initial_acc2]; - - SequencerConfig { - home, - override_rust_log: Some("info".to_owned()), - genesis_id: 1, - is_genesis_random: false, - max_num_tx_in_block: 10, - max_block_size: bytesize::ByteSize::mib(1), - mempool_max_size: 1000, - block_create_timeout: Duration::from_secs(1), - port: 8080, - initial_accounts, - initial_commitments: vec![], - signing_key: *sequencer_sign_key_for_testing().value(), - retry_pending_blocks_timeout: Duration::from_secs(60 * 4), - bedrock_config: BedrockConfig { - backoff: BackoffConfig { - start_delay: Duration::from_millis(100), - max_retries: 5, - }, - channel_id: [42; 32].into(), - node_url: "http://localhost:8080".parse().unwrap(), - auth: Some(BasicAuth { - username: "user".to_owned(), - password: None, - }), - }, - indexer_rpc_url: "ws://localhost:8779".parse().unwrap(), - } - } - - async fn components_for_tests() -> ( - JsonHandlerWithMockClients, - Vec, - NSSATransaction, - ) { - let config = sequencer_config_for_tests(); - - let (mut sequencer_core, mempool_handle) = - SequencerCoreWithMockClients::start_from_config(config).await; - let initial_accounts = sequencer_core.sequencer_config().initial_accounts.clone(); - - let signing_key = nssa::PrivateKey::try_new([1; 32]).unwrap(); - let balance_to_move = 10; - let tx = common::test_utils::create_transaction_native_token_transfer( - AccountId::from_str( - &[ - 148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, - 112, 83, 153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102, - ] - .to_base58(), - ) - .unwrap(), - 0, - AccountId::from_str(&[2; 32].to_base58()).unwrap(), - balance_to_move, - &signing_key, - ); - - mempool_handle - .push(tx.clone()) - .await - .expect("Mempool is closed, this is a bug"); - - sequencer_core - .produce_new_block_with_mempool_transactions() - .unwrap(); - - let max_block_size = - usize::try_from(sequencer_core.sequencer_config().max_block_size.as_u64()) - .expect("`max_block_size` is expected to fit in usize"); - let sequencer_core = Arc::new(Mutex::new(sequencer_core)); - - ( - JsonHandlerWithMockClients { - sequencer_state: sequencer_core, - mempool_handle, - max_block_size, - }, - initial_accounts, - tx, - ) - } - - async fn call_rpc_handler_with_json( - handler: JsonHandlerWithMockClients, - request_json: Value, - ) -> Value { - use actix_web::{App, test, web}; - - let app = test::init_service(App::new().app_data(web::Data::new(handler)).route( - "/", - web::post().to(rpc_handler::), - )) - .await; - - let req = test::TestRequest::post() - .uri("/") - .set_json(request_json) - .to_request(); - - let resp = test::call_service(&app, req).await; - let body = test::read_body(resp).await; - - serde_json::from_slice(&body).unwrap() - } - - #[actix_web::test] - async fn get_account_balance_for_non_existent_account() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_account_balance", - "params": { "account_id": "11".repeat(16) }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "balance": 0 - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_account_balance_for_invalid_base58() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_account_balance", - "params": { "account_id": "not_a_valid_base58" }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "error": { - "cause": { - "info": { - "error_message": "Failed parsing args: invalid base58: InvalidBase58Character('_', 3)" - }, - "name": "PARSE_ERROR" - }, - "code": -32700, - "data": "Failed parsing args: invalid base58: InvalidBase58Character('_', 3)", - "message": "Parse error", - "name": "REQUEST_VALIDATION_ERROR" - }, - }); - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_account_balance_for_invalid_length() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_account_balance", - "params": { "account_id": "cafecafe" }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "error": { - "cause": { - "info": { - "error_message": "Failed parsing args: invalid length: expected 32 bytes, got 6" - }, - "name": "PARSE_ERROR" - }, - "code": -32700, - "data": "Failed parsing args: invalid length: expected 32 bytes, got 6", - "message": "Parse error", - "name": "REQUEST_VALIDATION_ERROR" - }, - }); - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_account_balance_for_existing_account() { - let (json_handler, initial_accounts, _) = components_for_tests().await; - - let acc1_id = initial_accounts[0].account_id; - - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_account_balance", - "params": { "account_id": acc1_id }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "balance": 10000 - 10 - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_accounts_nonces_for_non_existent_account() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_accounts_nonces", - "params": { "account_ids": ["11".repeat(16)] }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "nonces": [ 0 ] - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_accounts_nonces_for_existent_account() { - let (json_handler, initial_accounts, _) = components_for_tests().await; - - let acc1_id = initial_accounts[0].account_id; - let acc2_id = initial_accounts[1].account_id; - - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_accounts_nonces", - "params": { "account_ids": [acc1_id, acc2_id] }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "nonces": [ 1, 0 ] - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_account_data_for_non_existent_account() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_account", - "params": { "account_id": "11".repeat(16) }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "account": { - "balance": 0, - "nonce": 0, - "program_owner": [ 0, 0, 0, 0, 0, 0, 0, 0], - "data": [], - } - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_transaction_by_hash_for_non_existent_hash() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_transaction_by_hash", - "params": { "hash": "cafe".repeat(16) }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "transaction": null - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_transaction_by_hash_for_invalid_hex() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_transaction_by_hash", - "params": { "hash": "not_a_valid_hex" }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "error": { - "cause": { - "info": { - "error_message": "Failed parsing args: Odd number of digits" - }, - "name": "PARSE_ERROR" - }, - "code": -32700, - "data": "Failed parsing args: Odd number of digits", - "message": "Parse error", - "name": "REQUEST_VALIDATION_ERROR" - }, - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_transaction_by_hash_for_invalid_length() { - let (json_handler, _, _) = components_for_tests().await; - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_transaction_by_hash", - "params": { "hash": "cafecafe" }, - "id": 1 - }); - let expected_response = serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "error": { - "cause": { - "info": { - "error_message": "Failed parsing args: Invalid string length" - }, - "name": "PARSE_ERROR" - }, - "code": -32700, - "data": "Failed parsing args: Invalid string length", - "message": "Parse error", - "name": "REQUEST_VALIDATION_ERROR" - } - }); - - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } - - #[actix_web::test] - async fn get_transaction_by_hash_for_existing_transaction() { - let (json_handler, _, tx) = components_for_tests().await; - let tx_hash_hex = hex::encode(tx.hash()); - let expected_base64_encoded = general_purpose::STANDARD.encode(borsh::to_vec(&tx).unwrap()); - - let request = serde_json::json!({ - "jsonrpc": "2.0", - "method": "get_transaction_by_hash", - "params": { "hash": tx_hash_hex}, - "id": 1 - }); - - let expected_response = serde_json::json!({ - "id": 1, - "jsonrpc": "2.0", - "result": { - "transaction": expected_base64_encoded, - } - }); - let response = call_rpc_handler_with_json(json_handler, request).await; - - assert_eq!(response, expected_response); - } -} diff --git a/sequencer_rpc/src/types/err_rpc.rs b/sequencer_rpc/src/types/err_rpc.rs deleted file mode 100644 index 4cb75606..00000000 --- a/sequencer_rpc/src/types/err_rpc.rs +++ /dev/null @@ -1,49 +0,0 @@ -use common::{ - rpc_primitives::errors::{RpcError, RpcParseError}, - transaction::TransactionMalformationError, -}; - -macro_rules! standard_rpc_err_kind { - ($type_name:path) => { - impl RpcErrKind for $type_name { - fn into_rpc_err(self) -> RpcError { - self.into() - } - } - }; -} - -pub struct RpcErr(pub RpcError); - -pub type RpcErrInternal = anyhow::Error; - -pub trait RpcErrKind: 'static { - fn into_rpc_err(self) -> RpcError; -} - -impl From for RpcErr { - fn from(e: T) -> Self { - Self(e.into_rpc_err()) - } -} - -standard_rpc_err_kind!(RpcError); -standard_rpc_err_kind!(RpcParseError); - -impl RpcErrKind for serde_json::Error { - fn into_rpc_err(self) -> RpcError { - RpcError::serialization_error(&self.to_string()) - } -} - -impl RpcErrKind for RpcErrInternal { - fn into_rpc_err(self) -> RpcError { - RpcError::new_internal_error(None, &format!("{self:#?}")) - } -} - -impl RpcErrKind for TransactionMalformationError { - fn into_rpc_err(self) -> RpcError { - RpcError::invalid_params(Some(serde_json::to_value(self).unwrap())) - } -} diff --git a/sequencer_rpc/src/types/mod.rs b/sequencer_rpc/src/types/mod.rs deleted file mode 100644 index 0b78fea1..00000000 --- a/sequencer_rpc/src/types/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod err_rpc; diff --git a/sequencer_runner/docker-compose.yml b/sequencer_runner/docker-compose.yml deleted file mode 100644 index 5301962c..00000000 --- a/sequencer_runner/docker-compose.yml +++ /dev/null @@ -1,14 +0,0 @@ -services: - sequencer_runner: - image: lssa/sequencer_runner - build: - context: .. - dockerfile: sequencer_runner/Dockerfile - container_name: sequencer_runner - ports: - - "3040:3040" - volumes: - # Mount configuration folder - - ./configs/docker:/etc/sequencer_runner - # Mount data folder - - ./data:/var/lib/sequencer_runner diff --git a/sequencer_runner/docker-entrypoint.sh b/sequencer_runner/docker-entrypoint.sh deleted file mode 100644 index fb117131..00000000 --- a/sequencer_runner/docker-entrypoint.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -# This is an entrypoint script for the sequencer_runner Docker container, -# it's not meant to be executed outside of the container. - -set -e - -CONFIG="/etc/sequencer_runner/sequencer_config.json" - -# Check config file exists -if [ ! -f "$CONFIG" ]; then - echo "Config file not found: $CONFIG" >&2 - exit 1 -fi - -# Parse home dir -HOME_DIR=$(jq -r '.home' "$CONFIG") - -if [ -z "$HOME_DIR" ] || [ "$HOME_DIR" = "null" ]; then - echo "'home' key missing in config" >&2 - exit 1 -fi - -# Give permissions to the data directory and switch to non-root user -if [ "$(id -u)" = "0" ]; then - mkdir -p "$HOME_DIR" - chown -R sequencer_user:sequencer_user "$HOME_DIR" - exec gosu sequencer_user "$@" -fi diff --git a/sequencer_runner/src/main.rs b/sequencer_runner/src/main.rs deleted file mode 100644 index 3bf4ee2d..00000000 --- a/sequencer_runner/src/main.rs +++ /dev/null @@ -1,16 +0,0 @@ -use anyhow::Result; -use sequencer_runner::main_runner; - -pub const NUM_THREADS: usize = 4; - -// TODO: Why it requires config as a directory and not as a file? -fn main() -> Result<()> { - actix::System::with_tokio_rt(|| { - tokio::runtime::Builder::new_multi_thread() - .worker_threads(NUM_THREADS) - .enable_all() - .build() - .unwrap() - }) - .block_on(main_runner()) -} diff --git a/storage/src/cells/mod.rs b/storage/src/cells/mod.rs new file mode 100644 index 00000000..76c1ff8c --- /dev/null +++ b/storage/src/cells/mod.rs @@ -0,0 +1,96 @@ +use std::sync::Arc; + +use borsh::{BorshDeserialize, BorshSerialize}; +use rocksdb::{BoundColumnFamily, DBWithThreadMode, MultiThreaded, WriteBatch}; + +use crate::{DbResult, error::DbError}; + +pub mod shared_cells; + +pub trait SimpleStorableCell { + const CF_NAME: &'static str; + const CELL_NAME: &'static str; + type KeyParams; + + fn key_constructor(_params: Self::KeyParams) -> DbResult> { + borsh::to_vec(&Self::CELL_NAME).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to serialize {:?}", Self::CELL_NAME)), + ) + }) + } + + fn column_ref(db: &DBWithThreadMode) -> Arc> { + db.cf_handle(Self::CF_NAME) + .unwrap_or_else(|| panic!("Column family {:?} must be present", Self::CF_NAME)) + } +} + +pub trait SimpleReadableCell: SimpleStorableCell + BorshDeserialize { + fn get(db: &DBWithThreadMode, params: Self::KeyParams) -> DbResult { + let res = Self::get_opt(db, params)?; + + res.ok_or_else(|| DbError::db_interaction_error(format!("{:?} not found", Self::CELL_NAME))) + } + + fn get_opt( + db: &DBWithThreadMode, + params: Self::KeyParams, + ) -> DbResult> { + let cf_ref = Self::column_ref(db); + let res = db + .get_cf(&cf_ref, Self::key_constructor(params)?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to read {:?}", Self::CELL_NAME)), + ) + })?; + + res.map(|data| { + borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), + ) + }) + }) + .transpose() + } +} + +pub trait SimpleWritableCell: SimpleStorableCell + BorshSerialize { + fn value_constructor(&self) -> DbResult>; + + fn put(&self, db: &DBWithThreadMode, params: Self::KeyParams) -> DbResult<()> { + let cf_ref = Self::column_ref(db); + db.put_cf( + &cf_ref, + Self::key_constructor(params)?, + self.value_constructor()?, + ) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to write {:?}", Self::CELL_NAME)), + ) + })?; + Ok(()) + } + + fn put_batch( + &self, + db: &DBWithThreadMode, + params: Self::KeyParams, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + let cf_ref = Self::column_ref(db); + write_batch.put_cf( + &cf_ref, + Self::key_constructor(params)?, + self.value_constructor()?, + ); + Ok(()) + } +} diff --git a/storage/src/cells/shared_cells.rs b/storage/src/cells/shared_cells.rs new file mode 100644 index 00000000..2a76edf3 --- /dev/null +++ b/storage/src/cells/shared_cells.rs @@ -0,0 +1,89 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use common::block::Block; + +use crate::{ + BLOCK_CELL_NAME, CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY, + DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult, + cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell}, + error::DbError, +}; + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct LastBlockCell(pub u64); + +impl SimpleStorableCell for LastBlockCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LAST_BLOCK_IN_DB_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for LastBlockCell {} + +impl SimpleWritableCell for LastBlockCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize last block id".to_owned())) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct FirstBlockSetCell(pub bool); + +impl SimpleStorableCell for FirstBlockSetCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_SET_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for FirstBlockSetCell {} + +impl SimpleWritableCell for FirstBlockSetCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize first block set flag".to_owned()), + ) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct FirstBlockCell(pub u64); + +impl SimpleStorableCell for FirstBlockCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_IN_DB_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for FirstBlockCell {} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct BlockCell(pub Block); + +impl SimpleStorableCell for BlockCell { + type KeyParams = u64; + + const CELL_NAME: &'static str = BLOCK_CELL_NAME; + const CF_NAME: &'static str = CF_BLOCK_NAME; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + // ToDo: Replace with increasing ordering serialization + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleReadableCell for BlockCell {} diff --git a/storage/src/indexer.rs b/storage/src/indexer.rs deleted file mode 100644 index 534a1c0b..00000000 --- a/storage/src/indexer.rs +++ /dev/null @@ -1,1346 +0,0 @@ -use std::{collections::HashMap, path::Path, sync::Arc}; - -use common::{ - block::{Block, BlockId}, - transaction::NSSATransaction, -}; -use nssa::V02State; -use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, -}; - -use crate::error::DbError; - -/// Maximal size of stored blocks in base. -/// -/// Used to control db size. -/// -/// Currently effectively unbounded. -pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; - -/// Size of stored blocks cache in memory. -/// -/// Keeping small to not run out of memory. -pub const CACHE_SIZE: usize = 1000; - -/// Key base for storing metainformation about id of first block in db. -pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; -/// Key base for storing metainformation about id of last current block in db. -pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; -/// Key base for storing metainformation about id of last observed L1 lib header in db. -pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str = - "last_observed_l1_lib_header_in_db"; -/// Key base for storing metainformation which describe if first block has been set. -pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; -/// Key base for storing metainformation about the last breakpoint. -pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; - -/// Interval between state breakpoints. -pub const BREAKPOINT_INTERVAL: u8 = 100; - -/// Name of block column family. -pub const CF_BLOCK_NAME: &str = "cf_block"; -/// Name of meta column family. -pub const CF_META_NAME: &str = "cf_meta"; -/// Name of breakpoint column family. -pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; -/// Name of hash to id map column family. -pub const CF_HASH_TO_ID: &str = "cf_hash_to_id"; -/// Name of tx hash to id map column family. -pub const CF_TX_TO_ID: &str = "cf_tx_to_id"; -/// Name of account meta column family. -pub const CF_ACC_META: &str = "cf_acc_meta"; -/// Name of account id to tx hash map column family. -pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx"; - -pub type DbResult = Result; - -pub struct RocksDBIO { - pub db: DBWithThreadMode, -} - -impl RocksDBIO { - pub fn open_or_create( - path: &Path, - genesis_block: &Block, - initial_state: &V02State, - ) -> DbResult { - let mut cf_opts = Options::default(); - cf_opts.set_max_write_buffer_number(16); - // ToDo: Add more column families for different data - let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); - let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let cfbreakpoint = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); - let cfhti = ColumnFamilyDescriptor::new(CF_HASH_TO_ID, cf_opts.clone()); - let cftti = ColumnFamilyDescriptor::new(CF_TX_TO_ID, cf_opts.clone()); - let cfameta = ColumnFamilyDescriptor::new(CF_ACC_META, cf_opts.clone()); - let cfatt = ColumnFamilyDescriptor::new(CF_ACC_TO_TX, cf_opts.clone()); - - let mut db_opts = Options::default(); - db_opts.create_missing_column_families(true); - db_opts.create_if_missing(true); - let db = DBWithThreadMode::::open_cf_descriptors( - &db_opts, - path, - vec![cfb, cfmeta, cfbreakpoint, cfhti, cftti, cfameta, cfatt], - ) - .map_err(|err| DbError::RocksDbError { - error: err, - additional_info: Some("Failed to open or create DB".to_owned()), - })?; - - let dbio = Self { db }; - - let is_start_set = dbio.get_meta_is_first_block_set()?; - if !is_start_set { - let block_id = genesis_block.header.block_id; - dbio.put_meta_last_block_in_db(block_id)?; - dbio.put_meta_first_block_in_db(genesis_block)?; - dbio.put_meta_is_first_block_set()?; - - // First breakpoint setup - dbio.put_breakpoint(0, initial_state)?; - dbio.put_meta_last_breakpoint_id(0)?; - } - - Ok(dbio) - } - - pub fn destroy(path: &Path) -> DbResult<()> { - let mut cf_opts = Options::default(); - cf_opts.set_max_write_buffer_number(16); - // ToDo: Add more column families for different data - let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); - let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let _cfsnapshot = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); - let _cfhti = ColumnFamilyDescriptor::new(CF_HASH_TO_ID, cf_opts.clone()); - let _cftti = ColumnFamilyDescriptor::new(CF_TX_TO_ID, cf_opts.clone()); - let _cfameta = ColumnFamilyDescriptor::new(CF_ACC_META, cf_opts.clone()); - let _cfatt = ColumnFamilyDescriptor::new(CF_ACC_TO_TX, cf_opts.clone()); - - let mut db_opts = Options::default(); - db_opts.create_missing_column_families(true); - db_opts.create_if_missing(true); - DBWithThreadMode::::destroy(&db_opts, path) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) - } - - // Columns - - pub fn meta_column(&self) -> Arc> { - self.db.cf_handle(CF_META_NAME).unwrap() - } - - pub fn block_column(&self) -> Arc> { - self.db.cf_handle(CF_BLOCK_NAME).unwrap() - } - - pub fn breakpoint_column(&self) -> Arc> { - self.db.cf_handle(CF_BREAKPOINT_NAME).unwrap() - } - - pub fn hash_to_id_column(&self) -> Arc> { - self.db.cf_handle(CF_HASH_TO_ID).unwrap() - } - - pub fn tx_hash_to_id_column(&self) -> Arc> { - self.db.cf_handle(CF_TX_TO_ID).unwrap() - } - - pub fn account_id_to_tx_hash_column(&self) -> Arc> { - self.db.cf_handle(CF_ACC_TO_TX).unwrap() - } - - pub fn account_meta_column(&self) -> Arc> { - self.db.cf_handle(CF_ACC_META).unwrap() - } - - // Meta - - pub fn get_meta_first_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize first block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "First block not found".to_owned(), - )) - } - } - - pub fn get_meta_last_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Last block not found".to_owned(), - )) - } - } - - pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult> { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err( - |err| { - DbError::borsh_cast_message( - err, - Some( - "Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY".to_owned(), - ), - ) - }, - )?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - res.map(|data| { - borsh::from_slice::<[u8; 32]>(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last l1 lib header".to_owned()), - ) - }) - }) - .transpose() - } - - pub fn get_meta_is_first_block_set(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - Ok(res.is_some()) - } - - pub fn get_meta_last_breakpoint_id(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last breakpoint id".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Last breakpoint id not found".to_owned(), - )) - } - } - - pub fn put_meta_first_block_in_db(&self, block: &Block) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize first block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - self.put_block(block, [0; 32])?; - Ok(()) - } - - pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_meta_last_observed_l1_lib_header_in_db( - &self, - l1_lib_header: [u8; 32], - ) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err( - |err| { - DbError::borsh_cast_message( - err, - Some( - "Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY".to_owned(), - ), - ) - }, - )?, - borsh::to_vec(&l1_lib_header).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last l1 block header".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()), - ) - })?, - borsh::to_vec(&br_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), - ) - })?, - [1_u8; 1], - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - // Block - - pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> { - let cf_block = self.block_column(); - let cf_hti = self.hash_to_id_column(); - let cf_tti: Arc> = self.tx_hash_to_id_column(); - - // ToDo: rewrite this with write batching - - self.db - .put_cf( - &cf_block, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - borsh::to_vec(&block).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block data".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let last_curr_block = self.get_meta_last_block_in_db()?; - - if block.header.block_id > last_curr_block { - self.put_meta_last_block_in_db(block.header.block_id)?; - self.put_meta_last_observed_l1_lib_header_in_db(l1_lib_header)?; - } - - self.db - .put_cf( - &cf_hti, - borsh::to_vec(&block.header.hash).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block hash".to_owned()), - ) - })?, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new(); - - for tx in &block.body.transactions { - let tx_hash = tx.hash(); - - self.db - .put_cf( - &cf_tti, - borsh::to_vec(&tx_hash).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize tx hash".to_owned()), - ) - })?, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let acc_ids = tx - .affected_public_account_ids() - .into_iter() - .map(nssa::AccountId::into_value) - .collect::>(); - - for acc_id in acc_ids { - acc_to_tx_map - .entry(acc_id) - .and_modify(|tx_hashes| tx_hashes.push(tx_hash.into())) - .or_insert_with(|| vec![tx_hash.into()]); - } - } - - #[expect( - clippy::iter_over_hash_type, - reason = "RocksDB will keep ordering persistent" - )] - for (acc_id, tx_hashes) in acc_to_tx_map { - self.put_account_transactions(acc_id, &tx_hashes)?; - } - - if block - .header - .block_id - .is_multiple_of(u64::from(BREAKPOINT_INTERVAL)) - { - self.put_next_breakpoint()?; - } - - Ok(()) - } - - pub fn get_block(&self, block_id: u64) -> DbResult { - let cf_block = self.block_column(); - let res = self - .db - .get_cf( - &cf_block, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize block data".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Block on this id not found".to_owned(), - )) - } - } - - pub fn get_block_batch(&self, before: Option, limit: u64) -> DbResult> { - let cf_block = self.block_column(); - let mut block_batch = vec![]; - - // Determine the starting block ID - let start_block_id = if let Some(before_id) = before { - before_id.saturating_sub(1) - } else { - // Get the latest block ID - self.get_meta_last_block_in_db()? - }; - - // ToDo: Multi get this - - for i in 0..limit { - let block_id = start_block_id.saturating_sub(i); - if block_id == 0 { - break; - } - - let res = self - .db - .get_cf( - &cf_block, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let block = if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize block data".to_owned()), - ) - })?) - } else { - // Block not found, assuming that previous one was the last - break; - }?; - - block_batch.push(block); - } - - Ok(block_batch) - } - - // State - - pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V02State) -> DbResult<()> { - let cf_br = self.breakpoint_column(); - - self.db - .put_cf( - &cf_br, - borsh::to_vec(&br_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize breakpoint id".to_owned()), - ) - })?, - borsh::to_vec(&breakpoint).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize breakpoint data".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) - } - - pub fn get_breakpoint(&self, br_id: u64) -> DbResult { - let cf_br = self.breakpoint_column(); - let res = self - .db - .get_cf( - &cf_br, - borsh::to_vec(&br_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize breakpoint id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize breakpoint data".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Breakpoint on this id not found".to_owned(), - )) - } - } - - pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult { - let last_block = self.get_meta_last_block_in_db()?; - - if block_id <= last_block { - let br_id = closest_breakpoint_id(block_id); - let mut breakpoint = self.get_breakpoint(br_id)?; - - // ToDo: update it to handle any genesis id - // right now works correctly only if genesis_id < BREAKPOINT_INTERVAL - let start = if br_id != 0 { - u64::from(BREAKPOINT_INTERVAL) - .checked_mul(br_id) - .expect("Reached maximum breakpoint id") - } else { - self.get_meta_first_block_in_db()? - }; - - for id in start..=block_id { - let block = self.get_block(id)?; - - for transaction in block.body.transactions { - transaction - .transaction_stateless_check() - .map_err(|err| { - DbError::db_interaction_error(format!( - "transaction pre check failed with err {err:?}" - )) - })? - .execute_check_on_state(&mut breakpoint) - .map_err(|err| { - DbError::db_interaction_error(format!( - "transaction execution failed with err {err:?}" - )) - })?; - } - } - - Ok(breakpoint) - } else { - Err(DbError::db_interaction_error( - "Block on this id not found".to_owned(), - )) - } - } - - pub fn final_state(&self) -> DbResult { - self.calculate_state_for_id(self.get_meta_last_block_in_db()?) - } - - pub fn put_next_breakpoint(&self) -> DbResult<()> { - let last_block = self.get_meta_last_block_in_db()?; - let next_breakpoint_id = self - .get_meta_last_breakpoint_id()? - .checked_add(1) - .expect("Reached maximum breakpoint id"); - let block_to_break_id = next_breakpoint_id - .checked_mul(u64::from(BREAKPOINT_INTERVAL)) - .expect("Reached maximum breakpoint id"); - - if block_to_break_id <= last_block { - let next_breakpoint = self.calculate_state_for_id(block_to_break_id)?; - - self.put_breakpoint(next_breakpoint_id, &next_breakpoint)?; - self.put_meta_last_breakpoint_id(next_breakpoint_id) - } else { - Err(DbError::db_interaction_error( - "Breakpoint not yet achieved".to_owned(), - )) - } - } - - // Mappings - - pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult { - let cf_hti = self.hash_to_id_column(); - let res = self - .db - .get_cf( - &cf_hti, - borsh::to_vec(&hash).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block hash".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned())) - })?) - } else { - Err(DbError::db_interaction_error( - "Block on this hash not found".to_owned(), - )) - } - } - - pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult { - let cf_tti = self.tx_hash_to_id_column(); - let res = self - .db - .get_cf( - &cf_tti, - borsh::to_vec(&tx_hash).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize transaction hash".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned())) - })?) - } else { - Err(DbError::db_interaction_error( - "Block for this tx hash not found".to_owned(), - )) - } - } - - // Accounts meta - - fn update_acc_meta_batch( - &self, - acc_id: [u8; 32], - num_tx: u64, - write_batch: &mut WriteBatch, - ) -> DbResult<()> { - let cf_ameta = self.account_meta_column(); - - write_batch.put_cf( - &cf_ameta, - borsh::to_vec(&acc_id).map_err(|err| { - DbError::borsh_cast_message(err, Some("Failed to serialize account id".to_owned())) - })?, - borsh::to_vec(&num_tx).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize acc metadata".to_owned()), - ) - })?, - ); - - Ok(()) - } - - fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult> { - let cf_ameta = self.account_meta_column(); - let res = self.db.get_cf(&cf_ameta, acc_id).map_err(|rerr| { - DbError::rocksdb_cast_message(rerr, Some("Failed to read from acc meta cf".to_owned())) - })?; - - res.map(|data| { - borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message(serr, Some("Failed to deserialize num tx".to_owned())) - }) - }) - .transpose() - } - - // Account - - pub fn put_account_transactions( - &self, - acc_id: [u8; 32], - tx_hashes: &[[u8; 32]], - ) -> DbResult<()> { - let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0); - let cf_att = self.account_id_to_tx_hash_column(); - let mut write_batch = WriteBatch::new(); - - for (tx_id, tx_hash) in tx_hashes.iter().enumerate() { - let put_id = acc_num_tx - .checked_add( - u64::try_from(tx_id) - .expect("Transaction number for account expected to fit in u64"), - ) - .expect("Reached maximum number of transactions for account"); - - let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { - DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) - })?; - let suffix = borsh::to_vec(&put_id).map_err(|berr| { - DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned())) - })?; - - prefix.extend_from_slice(&suffix); - - write_batch.put_cf( - &cf_att, - prefix, - borsh::to_vec(tx_hash).map_err(|berr| { - DbError::borsh_cast_message( - berr, - Some("Failed to serialize tx hash".to_owned()), - ) - })?, - ); - } - - self.update_acc_meta_batch( - acc_id, - acc_num_tx - .checked_add( - u64::try_from(tx_hashes.len()) - .expect("Number of transactions expected to fit in u64"), - ) - .expect("Reached maximum number of transactions for account"), - &mut write_batch, - )?; - - self.db.write(write_batch).map_err(|rerr| { - DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned())) - }) - } - - fn get_acc_transaction_hashes( - &self, - acc_id: [u8; 32], - offset: u64, - limit: u64, - ) -> DbResult> { - let cf_att = self.account_id_to_tx_hash_column(); - let mut tx_batch = vec![]; - - // ToDo: Multi get this - - for tx_id in offset..(offset.saturating_add(limit)) { - let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { - DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) - })?; - let suffix = borsh::to_vec(&tx_id).map_err(|berr| { - DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned())) - })?; - - prefix.extend_from_slice(&suffix); - - let res = self - .db - .get_cf(&cf_att, prefix) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let tx_hash = if let Some(data) = res { - Ok(borsh::from_slice::<[u8; 32]>(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize tx_hash".to_owned()), - ) - })?) - } else { - // Tx hash not found, assuming that previous one was the last - break; - }?; - - tx_batch.push(tx_hash); - } - - Ok(tx_batch) - } - - pub fn get_acc_transactions( - &self, - acc_id: [u8; 32], - offset: u64, - limit: u64, - ) -> DbResult> { - let mut tx_batch = vec![]; - - for tx_hash in self.get_acc_transaction_hashes(acc_id, offset, limit)? { - let block_id = self.get_block_id_by_tx_hash(tx_hash)?; - let block = self.get_block(block_id)?; - - let transaction = block - .body - .transactions - .iter() - .find(|tx| tx.hash().0 == tx_hash) - .ok_or_else(|| { - DbError::db_interaction_error(format!( - "Missing transaction in block {} with hash {:#?}", - block.header.block_id, tx_hash - )) - })?; - - tx_batch.push(transaction.clone()); - } - - Ok(tx_batch) - } -} - -fn closest_breakpoint_id(block_id: u64) -> u64 { - block_id - .saturating_sub(1) - .checked_div(u64::from(BREAKPOINT_INTERVAL)) - .expect("Breakpoint interval is not zero") -} - -#[cfg(test)] -mod tests { - #![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")] - - use nssa::AccountId; - use tempfile::tempdir; - - use super::*; - - fn genesis_block() -> Block { - common::test_utils::produce_dummy_block(1, None, vec![]) - } - - fn acc1() -> AccountId { - AccountId::new([ - 148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, 112, 83, - 153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102, - ]) - } - - fn acc2() -> AccountId { - AccountId::new([ - 30, 145, 107, 3, 207, 73, 192, 230, 160, 63, 238, 207, 18, 69, 54, 216, 103, 244, 92, - 94, 124, 248, 42, 16, 141, 19, 119, 18, 14, 226, 140, 204, - ]) - } - - fn acc1_sign_key() -> nssa::PrivateKey { - nssa::PrivateKey::try_new([1; 32]).unwrap() - } - - fn acc2_sign_key() -> nssa::PrivateKey { - nssa::PrivateKey::try_new([2; 32]).unwrap() - } - - fn initial_state() -> V02State { - nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]) - } - - fn transfer(amount: u128, nonce: u128, direction: bool) -> NSSATransaction { - let from; - let to; - let sign_key = if direction { - from = acc1(); - to = acc2(); - acc1_sign_key() - } else { - from = acc2(); - to = acc1(); - acc2_sign_key() - }; - - common::test_utils::create_transaction_native_token_transfer( - from, nonce, to, amount, &sign_key, - ) - } - - #[test] - fn start_db() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let first_id = dbio.get_meta_first_block_in_db().unwrap(); - let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); - let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); - let last_block = dbio.get_block(1).unwrap(); - let breakpoint = dbio.get_breakpoint(0).unwrap(); - let final_state = dbio.final_state().unwrap(); - - assert_eq!(last_id, 1); - assert_eq!(first_id, 1); - assert!(is_first_set); - assert_eq!(last_br_id, 0); - assert_eq!(last_block.header.hash, genesis_block().header.hash); - assert_eq!( - breakpoint.get_account_by_id(acc1()), - final_state.get_account_by_id(acc1()) - ); - assert_eq!( - breakpoint.get_account_by_id(acc2()), - final_state.get_account_by_id(acc2()) - ); - } - - #[test] - fn one_block_insertion() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - let prev_hash = genesis_block().header.hash; - let transfer_tx = transfer(1, 0, true); - let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); - - dbio.put_block(&block, [1; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let first_id = dbio.get_meta_first_block_in_db().unwrap(); - let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); - let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - let breakpoint = dbio.get_breakpoint(0).unwrap(); - let final_state = dbio.final_state().unwrap(); - - assert_eq!(last_id, 2); - assert_eq!(first_id, 1); - assert!(is_first_set); - assert_eq!(last_br_id, 0); - assert_ne!(last_block.header.hash, genesis_block().header.hash); - assert_eq!( - breakpoint.get_account_by_id(acc1()).balance - - final_state.get_account_by_id(acc1()).balance, - 1 - ); - assert_eq!( - final_state.get_account_by_id(acc2()).balance - - breakpoint.get_account_by_id(acc2()).balance, - 1 - ); - } - - #[test] - fn new_breakpoint() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - for i in 1..BREAKPOINT_INTERVAL { - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, u128::from(i - 1), true); - let block = common::test_utils::produce_dummy_block( - u64::from(i + 1), - Some(prev_hash), - vec![transfer_tx], - ); - dbio.put_block(&block, [i; 32]).unwrap(); - } - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let first_id = dbio.get_meta_first_block_in_db().unwrap(); - let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); - let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - let prev_breakpoint = dbio.get_breakpoint(0).unwrap(); - let breakpoint = dbio.get_breakpoint(1).unwrap(); - let final_state = dbio.final_state().unwrap(); - - assert_eq!(last_id, 100); - assert_eq!(first_id, 1); - assert!(is_first_set); - assert_eq!(last_br_id, 1); - assert_ne!(last_block.header.hash, genesis_block().header.hash); - assert_eq!( - prev_breakpoint.get_account_by_id(acc1()).balance - - final_state.get_account_by_id(acc1()).balance, - 99 - ); - assert_eq!( - final_state.get_account_by_id(acc2()).balance - - prev_breakpoint.get_account_by_id(acc2()).balance, - 99 - ); - assert_eq!( - breakpoint.get_account_by_id(acc1()), - final_state.get_account_by_id(acc1()) - ); - assert_eq!( - breakpoint.get_account_by_id(acc2()), - final_state.get_account_by_id(acc2()) - ); - } - - #[test] - fn simple_maps() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 0, true); - let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); - - let control_hash1 = block.header.hash; - - dbio.put_block(&block, [1; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 1, true); - let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); - - let control_hash2 = block.header.hash; - - dbio.put_block(&block, [2; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 2, true); - - let control_tx_hash1 = transfer_tx.hash(); - - let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); - dbio.put_block(&block, [3; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 3, true); - - let control_tx_hash2 = transfer_tx.hash(); - - let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); - dbio.put_block(&block, [4; 32]).unwrap(); - - let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap(); - let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap(); - let control_block_id3 = dbio.get_block_id_by_tx_hash(control_tx_hash1.0).unwrap(); - let control_block_id4 = dbio.get_block_id_by_tx_hash(control_tx_hash2.0).unwrap(); - - assert_eq!(control_block_id1, 2); - assert_eq!(control_block_id2, 3); - assert_eq!(control_block_id3, 4); - assert_eq!(control_block_id4, 5); - } - - #[test] - fn block_batch() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let mut block_res = vec![]; - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 0, true); - let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); - - block_res.push(block.clone()); - dbio.put_block(&block, [1; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 1, true); - let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); - - block_res.push(block.clone()); - dbio.put_block(&block, [2; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 2, true); - - let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); - block_res.push(block.clone()); - dbio.put_block(&block, [3; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 3, true); - - let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); - block_res.push(block.clone()); - dbio.put_block(&block, [4; 32]).unwrap(); - - let block_hashes_mem: Vec<[u8; 32]> = - block_res.into_iter().map(|bl| bl.header.hash.0).collect(); - - // Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4 - // This should return blocks 5, 4, 3, 2 in descending order - let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap(); - batch_res.reverse(); // Reverse to match ascending order for comparison - - let block_hashes_db: Vec<[u8; 32]> = - batch_res.into_iter().map(|bl| bl.header.hash.0).collect(); - - assert_eq!(block_hashes_mem, block_hashes_db); - - let block_hashes_mem_limited = &block_hashes_mem[1..]; - - // Get blocks before ID 6, limit 3 - // This should return blocks 5, 4, 3 in descending order - let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap(); - batch_res_limited.reverse(); // Reverse to match ascending order for comparison - - let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited - .into_iter() - .map(|bl| bl.header.hash.0) - .collect(); - - assert_eq!(block_hashes_mem_limited, block_hashes_db_limited.as_slice()); - } - - #[test] - fn account_map() { - let temp_dir = tempdir().unwrap(); - let temdir_path = temp_dir.path(); - - let mut tx_hash_res = vec![]; - - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 0, true); - - tx_hash_res.push(transfer_tx.hash().0); - - let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); - - dbio.put_block(&block, [1; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 1, true); - - tx_hash_res.push(transfer_tx.hash().0); - - let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); - - dbio.put_block(&block, [2; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 2, true); - - tx_hash_res.push(transfer_tx.hash().0); - - let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); - - dbio.put_block(&block, [3; 32]).unwrap(); - - let last_id = dbio.get_meta_last_block_in_db().unwrap(); - let last_block = dbio.get_block(last_id).unwrap(); - - let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 3, true); - - tx_hash_res.push(transfer_tx.hash().0); - - let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); - - dbio.put_block(&block, [4; 32]).unwrap(); - - let acc1_tx = dbio.get_acc_transactions(*acc1().value(), 0, 4).unwrap(); - let acc1_tx_hashes: Vec<[u8; 32]> = acc1_tx.into_iter().map(|tx| tx.hash().0).collect(); - - assert_eq!(acc1_tx_hashes, tx_hash_res); - - let acc1_tx_limited = dbio.get_acc_transactions(*acc1().value(), 1, 4).unwrap(); - let acc1_tx_limited_hashes: Vec<[u8; 32]> = - acc1_tx_limited.into_iter().map(|tx| tx.hash().0).collect(); - - assert_eq!(acc1_tx_limited_hashes.as_slice(), &tx_hash_res[1..]); - } -} diff --git a/storage/src/indexer/indexer_cells.rs b/storage/src/indexer/indexer_cells.rs new file mode 100644 index 00000000..76a2c035 --- /dev/null +++ b/storage/src/indexer/indexer_cells.rs @@ -0,0 +1,230 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use nssa::V03State; + +use crate::{ + CF_META_NAME, DbResult, + cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell}, + error::DbError, + indexer::{ + ACC_NUM_CELL_NAME, BLOCK_HASH_CELL_NAME, BREAKPOINT_CELL_NAME, CF_ACC_META, + CF_BREAKPOINT_NAME, CF_HASH_TO_ID, CF_TX_TO_ID, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, TX_HASH_CELL_NAME, + }, +}; + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct LastObservedL1LibHeaderCell(pub [u8; 32]); + +impl SimpleStorableCell for LastObservedL1LibHeaderCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for LastObservedL1LibHeaderCell {} + +impl SimpleWritableCell for LastObservedL1LibHeaderCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last observed l1 header".to_owned()), + ) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct LastBreakpointIdCell(pub u64); + +impl SimpleStorableCell for LastBreakpointIdCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LAST_BREAKPOINT_ID; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for LastBreakpointIdCell {} + +impl SimpleWritableCell for LastBreakpointIdCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last breakpoint id".to_owned()), + ) + }) + } +} + +#[derive(BorshDeserialize)] +pub struct BreakpointCellOwned(pub V03State); + +impl SimpleStorableCell for BreakpointCellOwned { + type KeyParams = u64; + + const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME; + const CF_NAME: &'static str = CF_BREAKPOINT_NAME; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleReadableCell for BreakpointCellOwned {} + +#[derive(BorshSerialize)] +pub struct BreakpointCellRef<'state>(pub &'state V03State); + +impl SimpleStorableCell for BreakpointCellRef<'_> { + type KeyParams = u64; + + const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME; + const CF_NAME: &'static str = CF_BREAKPOINT_NAME; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleWritableCell for BreakpointCellRef<'_> { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize breakpoint".to_owned())) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct BlockHashToBlockIdMapCell(pub u64); + +impl SimpleStorableCell for BlockHashToBlockIdMapCell { + type KeyParams = [u8; 32]; + + const CELL_NAME: &'static str = BLOCK_HASH_CELL_NAME; + const CF_NAME: &'static str = CF_HASH_TO_ID; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleReadableCell for BlockHashToBlockIdMapCell {} + +impl SimpleWritableCell for BlockHashToBlockIdMapCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct TxHashToBlockIdMapCell(pub u64); + +impl SimpleStorableCell for TxHashToBlockIdMapCell { + type KeyParams = [u8; 32]; + + const CELL_NAME: &'static str = TX_HASH_CELL_NAME; + const CF_NAME: &'static str = CF_TX_TO_ID; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleReadableCell for TxHashToBlockIdMapCell {} + +impl SimpleWritableCell for TxHashToBlockIdMapCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct AccNumTxCell(pub u64); + +impl SimpleStorableCell for AccNumTxCell { + type KeyParams = [u8; 32]; + + const CELL_NAME: &'static str = ACC_NUM_CELL_NAME; + const CF_NAME: &'static str = CF_ACC_META; + + fn key_constructor(params: Self::KeyParams) -> DbResult> { + borsh::to_vec(¶ms).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!( + "Failed to serialize {:?} key params", + Self::CELL_NAME + )), + ) + }) + } +} + +impl SimpleReadableCell for AccNumTxCell {} + +impl SimpleWritableCell for AccNumTxCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize number of transactions".to_owned()), + ) + }) + } +} + +#[cfg(test)] +mod uniform_tests { + use crate::{ + cells::SimpleStorableCell as _, + indexer::indexer_cells::{BreakpointCellOwned, BreakpointCellRef}, + }; + + #[test] + fn breakpoint_ref_and_owned_is_aligned() { + assert_eq!(BreakpointCellRef::CELL_NAME, BreakpointCellOwned::CELL_NAME); + assert_eq!(BreakpointCellRef::CF_NAME, BreakpointCellOwned::CF_NAME); + assert_eq!( + BreakpointCellRef::key_constructor(1000).unwrap(), + BreakpointCellOwned::key_constructor(1000).unwrap() + ); + } +} diff --git a/storage/src/indexer/mod.rs b/storage/src/indexer/mod.rs new file mode 100644 index 00000000..7ca89a56 --- /dev/null +++ b/storage/src/indexer/mod.rs @@ -0,0 +1,681 @@ +use std::{path::Path, sync::Arc}; + +use common::block::Block; +use nssa::V03State; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, +}; + +use crate::{BREAKPOINT_INTERVAL, CF_BLOCK_NAME, CF_META_NAME, DBIO, DbResult, error::DbError}; + +pub mod indexer_cells; +pub mod read_multiple; +pub mod read_once; +pub mod write_atomic; +pub mod write_non_atomic; + +/// Key base for storing metainformation about id of last observed L1 lib header in db. +pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str = + "last_observed_l1_lib_header_in_db"; +/// Key base for storing metainformation about the last breakpoint. +pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; + +/// Cell name for a breakpoint. +pub const BREAKPOINT_CELL_NAME: &str = "breakpoint"; +/// Cell name for a block hash to block id map. +pub const BLOCK_HASH_CELL_NAME: &str = "block hash"; +/// Cell name for a tx hash to block id map. +pub const TX_HASH_CELL_NAME: &str = "tx hash"; +/// Cell name for a account number of transactions. +pub const ACC_NUM_CELL_NAME: &str = "acc id"; + +/// Name of breakpoint column family. +pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; +/// Name of hash to id map column family. +pub const CF_HASH_TO_ID: &str = "cf_hash_to_id"; +/// Name of tx hash to id map column family. +pub const CF_TX_TO_ID: &str = "cf_tx_to_id"; +/// Name of account meta column family. +pub const CF_ACC_META: &str = "cf_acc_meta"; +/// Name of account id to tx hash map column family. +pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx"; + +pub struct RocksDBIO { + pub db: DBWithThreadMode, +} + +impl DBIO for RocksDBIO { + fn db(&self) -> &DBWithThreadMode { + &self.db + } +} + +impl RocksDBIO { + pub fn open_or_create( + path: &Path, + genesis_block: &Block, + initial_state: &V03State, + ) -> DbResult { + let mut cf_opts = Options::default(); + cf_opts.set_max_write_buffer_number(16); + // ToDo: Add more column families for different data + let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); + let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); + let cfbreakpoint = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); + let cfhti = ColumnFamilyDescriptor::new(CF_HASH_TO_ID, cf_opts.clone()); + let cftti = ColumnFamilyDescriptor::new(CF_TX_TO_ID, cf_opts.clone()); + let cfameta = ColumnFamilyDescriptor::new(CF_ACC_META, cf_opts.clone()); + let cfatt = ColumnFamilyDescriptor::new(CF_ACC_TO_TX, cf_opts.clone()); + + let mut db_opts = Options::default(); + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + let db = DBWithThreadMode::::open_cf_descriptors( + &db_opts, + path, + vec![cfb, cfmeta, cfbreakpoint, cfhti, cftti, cfameta, cfatt], + ) + .map_err(|err| DbError::RocksDbError { + error: err, + additional_info: Some("Failed to open or create DB".to_owned()), + })?; + + let dbio = Self { db }; + + let is_start_set = dbio.get_meta_is_first_block_set()?; + if !is_start_set { + let block_id = genesis_block.header.block_id; + dbio.put_meta_last_block_in_db(block_id)?; + dbio.put_meta_first_block_in_db_batch(genesis_block)?; + dbio.put_meta_is_first_block_set()?; + + // First breakpoint setup + dbio.put_breakpoint(0, initial_state)?; + dbio.put_meta_last_breakpoint_id(0)?; + } + + Ok(dbio) + } + + pub fn destroy(path: &Path) -> DbResult<()> { + let db_opts = Options::default(); + DBWithThreadMode::::destroy(&db_opts, path) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) + } + + // Columns + + pub fn meta_column(&self) -> Arc> { + self.db + .cf_handle(CF_META_NAME) + .expect("Meta column should exist") + } + + pub fn block_column(&self) -> Arc> { + self.db + .cf_handle(CF_BLOCK_NAME) + .expect("Block column should exist") + } + + pub fn breakpoint_column(&self) -> Arc> { + self.db + .cf_handle(CF_BREAKPOINT_NAME) + .expect("Breakpoint column should exist") + } + + pub fn hash_to_id_column(&self) -> Arc> { + self.db + .cf_handle(CF_HASH_TO_ID) + .expect("Hash to id map column should exist") + } + + pub fn tx_hash_to_id_column(&self) -> Arc> { + self.db + .cf_handle(CF_TX_TO_ID) + .expect("Tx hash to id map column should exist") + } + + pub fn account_id_to_tx_hash_column(&self) -> Arc> { + self.db + .cf_handle(CF_ACC_TO_TX) + .expect("Account id to tx map column should exist") + } + + pub fn account_meta_column(&self) -> Arc> { + self.db + .cf_handle(CF_ACC_META) + .expect("Account meta column should exist") + } + + // State + + pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult { + let last_block = self.get_meta_last_block_in_db()?; + + if block_id <= last_block { + let br_id = closest_breakpoint_id(block_id); + let mut breakpoint = self.get_breakpoint(br_id)?; + + // ToDo: update it to handle any genesis id + // right now works correctly only if genesis_id < BREAKPOINT_INTERVAL + let start = if br_id != 0 { + u64::from(BREAKPOINT_INTERVAL) + .checked_mul(br_id) + .expect("Reached maximum breakpoint id") + } else { + self.get_meta_first_block_in_db()? + }; + + for block in self.get_block_batch_seq( + start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id, + )? { + for transaction in block.body.transactions { + transaction + .transaction_stateless_check() + .map_err(|err| { + DbError::db_interaction_error(format!( + "transaction pre check failed with err {err:?}" + )) + })? + .execute_check_on_state( + &mut breakpoint, + block.header.block_id, + block.header.timestamp, + ) + .map_err(|err| { + DbError::db_interaction_error(format!( + "transaction execution failed with err {err:?}" + )) + })?; + } + } + + Ok(breakpoint) + } else { + Err(DbError::db_interaction_error( + "Block on this id not found".to_owned(), + )) + } + } + + pub fn final_state(&self) -> DbResult { + self.calculate_state_for_id(self.get_meta_last_block_in_db()?) + } +} + +fn closest_breakpoint_id(block_id: u64) -> u64 { + block_id + .saturating_sub(1) + .checked_div(u64::from(BREAKPOINT_INTERVAL)) + .expect("Breakpoint interval is not zero") +} + +#[expect(clippy::shadow_unrelated, reason = "Fine for tests")] +#[cfg(test)] +mod tests { + use nssa::{AccountId, PublicKey}; + use tempfile::tempdir; + + use super::*; + + fn genesis_block() -> Block { + common::test_utils::produce_dummy_block(1, None, vec![]) + } + + fn acc1_sign_key() -> nssa::PrivateKey { + nssa::PrivateKey::try_new([1; 32]).unwrap() + } + + fn acc2_sign_key() -> nssa::PrivateKey { + nssa::PrivateKey::try_new([2; 32]).unwrap() + } + + fn acc1() -> AccountId { + AccountId::from(&PublicKey::new_from_private_key(&acc1_sign_key())) + } + + fn acc2() -> AccountId { + AccountId::from(&PublicKey::new_from_private_key(&acc2_sign_key())) + } + + #[test] + fn start_db() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let first_id = dbio.get_meta_first_block_in_db().unwrap(); + let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); + let last_observed_l1_header = dbio.get_meta_last_observed_l1_lib_header_in_db().unwrap(); + let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); + let last_block = dbio.get_block(1).unwrap().unwrap(); + let breakpoint = dbio.get_breakpoint(0).unwrap(); + let final_state = dbio.final_state().unwrap(); + + assert_eq!(last_id, 1); + assert_eq!(first_id, 1); + assert_eq!(last_observed_l1_header, None); + assert!(is_first_set); + assert_eq!(last_br_id, 0); + assert_eq!(last_block.header.hash, genesis_block().header.hash); + assert_eq!( + breakpoint.get_account_by_id(acc1()), + final_state.get_account_by_id(acc1()) + ); + assert_eq!( + breakpoint.get_account_by_id(acc2()), + final_state.get_account_by_id(acc2()) + ); + } + + #[test] + fn one_block_insertion() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let prev_hash = genesis_block().header.hash; + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); + let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); + + dbio.put_block(&block, [1; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let first_id = dbio.get_meta_first_block_in_db().unwrap(); + let last_observed_l1_header = dbio + .get_meta_last_observed_l1_lib_header_in_db() + .unwrap() + .unwrap(); + let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); + let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + let breakpoint = dbio.get_breakpoint(0).unwrap(); + let final_state = dbio.final_state().unwrap(); + + assert_eq!(last_id, 2); + assert_eq!(first_id, 1); + assert_eq!(last_observed_l1_header, [1; 32]); + assert!(is_first_set); + assert_eq!(last_br_id, 0); + assert_ne!(last_block.header.hash, genesis_block().header.hash); + assert_eq!( + breakpoint.get_account_by_id(acc1()).balance + - final_state.get_account_by_id(acc1()).balance, + 1 + ); + assert_eq!( + final_state.get_account_by_id(acc2()).balance + - breakpoint.get_account_by_id(acc2()).balance, + 1 + ); + } + + #[test] + fn new_breakpoint() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + for i in 1..=BREAKPOINT_INTERVAL { + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + + let transfer_tx = common::test_utils::create_transaction_native_token_transfer( + from, + (i - 1).into(), + to, + 1, + &sign_key, + ); + let block = common::test_utils::produce_dummy_block( + (i + 1).into(), + Some(prev_hash), + vec![transfer_tx], + ); + dbio.put_block(&block, [i; 32]).unwrap(); + } + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let first_id = dbio.get_meta_first_block_in_db().unwrap(); + let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); + let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + let prev_breakpoint = dbio.get_breakpoint(0).unwrap(); + let breakpoint = dbio.get_breakpoint(1).unwrap(); + let final_state = dbio.final_state().unwrap(); + + assert_eq!(last_id, 101); + assert_eq!(first_id, 1); + assert!(is_first_set); + assert_eq!(last_br_id, 1); + assert_ne!(last_block.header.hash, genesis_block().header.hash); + assert_eq!( + prev_breakpoint.get_account_by_id(acc1()).balance + - final_state.get_account_by_id(acc1()).balance, + 100 + ); + assert_eq!( + final_state.get_account_by_id(acc2()).balance + - prev_breakpoint.get_account_by_id(acc2()).balance, + 100 + ); + assert_eq!( + breakpoint.get_account_by_id(acc1()).balance + - final_state.get_account_by_id(acc1()).balance, + 1 + ); + assert_eq!( + final_state.get_account_by_id(acc2()).balance + - breakpoint.get_account_by_id(acc2()).balance, + 1 + ); + } + + #[test] + fn simple_maps() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); + let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); + + let control_hash1 = block.header.hash; + + dbio.put_block(&block, [1; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); + let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); + + let control_hash2 = block.header.hash; + + dbio.put_block(&block, [2; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); + + let control_tx_hash1 = transfer_tx.hash(); + + let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); + dbio.put_block(&block, [3; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); + + let control_tx_hash2 = transfer_tx.hash(); + + let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); + dbio.put_block(&block, [4; 32]).unwrap(); + + let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap(); + let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap().unwrap(); + let control_block_id3 = dbio + .get_block_id_by_tx_hash(control_tx_hash1.0) + .unwrap() + .unwrap(); + let control_block_id4 = dbio + .get_block_id_by_tx_hash(control_tx_hash2.0) + .unwrap() + .unwrap(); + + assert_eq!(control_block_id1, 2); + assert_eq!(control_block_id2, 3); + assert_eq!(control_block_id3, 4); + assert_eq!(control_block_id4, 5); + } + + #[test] + fn block_batch() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let mut block_res = vec![]; + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); + let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); + + block_res.push(block.clone()); + dbio.put_block(&block, [1; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); + let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); + + block_res.push(block.clone()); + dbio.put_block(&block, [2; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); + + let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); + block_res.push(block.clone()); + dbio.put_block(&block, [3; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); + + let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); + block_res.push(block.clone()); + dbio.put_block(&block, [4; 32]).unwrap(); + + let block_hashes_mem: Vec<[u8; 32]> = + block_res.into_iter().map(|bl| bl.header.hash.0).collect(); + + // Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4 + // This should return blocks 5, 4, 3, 2 in descending order + let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap(); + batch_res.reverse(); // Reverse to match ascending order for comparison + + let block_hashes_db: Vec<[u8; 32]> = + batch_res.into_iter().map(|bl| bl.header.hash.0).collect(); + + assert_eq!(block_hashes_mem, block_hashes_db); + + let block_hashes_mem_limited = &block_hashes_mem[1..]; + + // Get blocks before ID 6, limit 3 + // This should return blocks 5, 4, 3 in descending order + let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap(); + batch_res_limited.reverse(); // Reverse to match ascending order for comparison + + let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited + .into_iter() + .map(|bl| bl.header.hash.0) + .collect(); + + assert_eq!(block_hashes_mem_limited, block_hashes_db_limited.as_slice()); + + let block_batch_seq = dbio.get_block_batch_seq(1..=5).unwrap(); + let block_batch_ids = block_batch_seq + .into_iter() + .map(|block| block.header.block_id) + .collect::>(); + + assert_eq!(block_batch_ids, vec![1, 2, 3, 4, 5]); + } + + #[test] + fn account_map() { + let temp_dir = tempdir().unwrap(); + let temdir_path = temp_dir.path(); + + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + let mut tx_hash_res = vec![]; + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); + tx_hash_res.push(transfer_tx1.hash().0); + tx_hash_res.push(transfer_tx2.hash().0); + + let block = common::test_utils::produce_dummy_block( + 2, + Some(prev_hash), + vec![transfer_tx1, transfer_tx2], + ); + + dbio.put_block(&block, [1; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); + tx_hash_res.push(transfer_tx1.hash().0); + tx_hash_res.push(transfer_tx2.hash().0); + + let block = common::test_utils::produce_dummy_block( + 3, + Some(prev_hash), + vec![transfer_tx1, transfer_tx2], + ); + + dbio.put_block(&block, [2; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 4, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 5, to, 1, &sign_key); + tx_hash_res.push(transfer_tx1.hash().0); + tx_hash_res.push(transfer_tx2.hash().0); + + let block = common::test_utils::produce_dummy_block( + 4, + Some(prev_hash), + vec![transfer_tx1, transfer_tx2], + ); + + dbio.put_block(&block, [3; 32]).unwrap(); + + let last_id = dbio.get_meta_last_block_in_db().unwrap(); + let last_block = dbio.get_block(last_id).unwrap().unwrap(); + + let prev_hash = last_block.header.hash; + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key); + tx_hash_res.push(transfer_tx.hash().0); + + let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); + + dbio.put_block(&block, [4; 32]).unwrap(); + + let acc1_tx = dbio.get_acc_transactions(*acc1().value(), 0, 7).unwrap(); + let acc1_tx_hashes: Vec<[u8; 32]> = acc1_tx.into_iter().map(|tx| tx.hash().0).collect(); + + assert_eq!(acc1_tx_hashes, tx_hash_res); + + let acc1_tx_limited = dbio.get_acc_transactions(*acc1().value(), 1, 4).unwrap(); + let acc1_tx_limited_hashes: Vec<[u8; 32]> = + acc1_tx_limited.into_iter().map(|tx| tx.hash().0).collect(); + + assert_eq!(acc1_tx_limited_hashes.as_slice(), &tx_hash_res[1..5]); + } +} diff --git a/storage/src/indexer/read_multiple.rs b/storage/src/indexer/read_multiple.rs new file mode 100644 index 00000000..866fc7b0 --- /dev/null +++ b/storage/src/indexer/read_multiple.rs @@ -0,0 +1,209 @@ +use common::transaction::NSSATransaction; + +use super::{Block, DbError, DbResult, RocksDBIO}; + +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] +impl RocksDBIO { + pub fn get_block_batch(&self, before: Option, limit: u64) -> DbResult> { + let mut seq = vec![]; + + // Determine the starting block ID + let start_block_id = if let Some(before_id) = before { + before_id.saturating_sub(1) + } else { + // Get the latest block ID + self.get_meta_last_block_in_db()? + }; + + for i in 0..limit { + let block_id = start_block_id.saturating_sub(i); + if block_id == 0 { + break; + } + seq.push(block_id); + } + + self.get_block_batch_seq(seq.into_iter()) + } + + /// Get block batch from a sequence. + /// + /// Currently assumes non-decreasing sequence. + /// + /// `ToDo`: Add suport of arbitrary sequences. + pub fn get_block_batch_seq(&self, seq: impl Iterator) -> DbResult> { + let cf_block = self.block_column(); + + // Keys setup + let mut keys = vec![]; + for block_id in seq { + keys.push(( + &cf_block, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block id".to_owned()), + ) + })?, + )); + } + + let multi_get_res = self.db.multi_get_cf(keys); + + // Keys parsing + let mut block_batch = vec![]; + for res in multi_get_res { + let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + let block = if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize block data".to_owned()), + ) + })?) + } else { + // Block not found, assuming that previous one was the last + break; + }?; + + block_batch.push(block); + } + + Ok(block_batch) + } + + /// Get block ids by txs. + /// + /// `ToDo`: There may be multiple transactions in one block + /// so this method can take redundant reads. + /// Need to update signature and implementation. + fn get_block_ids_by_tx_vec(&self, tx_vec: &[[u8; 32]]) -> DbResult> { + let cf_tti = self.tx_hash_to_id_column(); + + // Keys setup + let mut keys = vec![]; + for tx_hash in tx_vec { + keys.push(( + &cf_tti, + borsh::to_vec(tx_hash).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize tx_hash".to_owned())) + })?, + )); + } + + let multi_get_res = self.db.multi_get_cf(keys); + + // Keys parsing + let mut block_id_batch = vec![]; + for res in multi_get_res { + let res = res + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))? + .ok_or_else(|| { + DbError::db_interaction_error( + "Tx to block id mapping do not contain transaction from vec".to_owned(), + ) + })?; + + let block_id = { + Ok(borsh::from_slice::(&res).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize block id".to_owned()), + ) + })?) + }?; + + block_id_batch.push(block_id); + } + + Ok(block_id_batch) + } + + // Account + + pub(crate) fn get_acc_transaction_hashes( + &self, + acc_id: [u8; 32], + offset: u64, + limit: u64, + ) -> DbResult> { + let cf_att = self.account_id_to_tx_hash_column(); + let mut tx_batch = vec![]; + + // Keys preparation + let mut keys = vec![]; + for tx_id in offset + ..offset + .checked_add(limit) + .expect("Transaction limit should be lesser than u64::MAX") + { + let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) + })?; + let suffix = borsh::to_vec(&tx_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned())) + })?; + + prefix.extend_from_slice(&suffix); + + keys.push((&cf_att, prefix)); + } + + let multi_get_res = self.db.multi_get_cf(keys); + + for res in multi_get_res { + let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + let tx_hash = if let Some(data) = res { + Ok(borsh::from_slice::<[u8; 32]>(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize tx_hash".to_owned()), + ) + })?) + } else { + // Tx hash not found, assuming that previous one was the last + break; + }?; + + tx_batch.push(tx_hash); + } + + Ok(tx_batch) + } + + pub fn get_acc_transactions( + &self, + acc_id: [u8; 32], + offset: u64, + limit: u64, + ) -> DbResult> { + let mut tx_batch = vec![]; + + let tx_hashes = self.get_acc_transaction_hashes(acc_id, offset, limit)?; + + let associated_blocks_multi_get = self + .get_block_batch_seq(self.get_block_ids_by_tx_vec(&tx_hashes)?.into_iter())? + .into_iter() + .zip(tx_hashes); + + for (block, tx_hash) in associated_blocks_multi_get { + let transaction = block + .body + .transactions + .iter() + .find(|tx| tx.hash().0 == tx_hash) + .ok_or_else(|| { + DbError::db_interaction_error(format!( + "Missing transaction in block {} with hash {:#?}", + block.header.block_id, tx_hash + )) + })?; + + tx_batch.push(transaction.clone()); + } + + Ok(tx_batch) + } +} diff --git a/storage/src/indexer/read_once.rs b/storage/src/indexer/read_once.rs new file mode 100644 index 00000000..b1ae0ada --- /dev/null +++ b/storage/src/indexer/read_once.rs @@ -0,0 +1,67 @@ +use super::{Block, DbResult, RocksDBIO, V03State}; +use crate::{ + DBIO as _, + cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell}, + indexer::indexer_cells::{ + AccNumTxCell, BlockHashToBlockIdMapCell, BreakpointCellOwned, LastBreakpointIdCell, + LastObservedL1LibHeaderCell, TxHashToBlockIdMapCell, + }, +}; + +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] +impl RocksDBIO { + // Meta + + pub fn get_meta_first_block_in_db(&self) -> DbResult { + self.get::(()).map(|cell| cell.0) + } + + pub fn get_meta_last_block_in_db(&self) -> DbResult { + self.get::(()).map(|cell| cell.0) + } + + pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult> { + self.get_opt::(()) + .map(|opt| opt.map(|val| val.0)) + } + + pub fn get_meta_is_first_block_set(&self) -> DbResult { + Ok(self.get_opt::(())?.is_some()) + } + + pub fn get_meta_last_breakpoint_id(&self) -> DbResult { + self.get::(()).map(|cell| cell.0) + } + + // Block + + pub fn get_block(&self, block_id: u64) -> DbResult> { + self.get_opt::(block_id) + .map(|opt| opt.map(|val| val.0)) + } + + // State + + pub fn get_breakpoint(&self, br_id: u64) -> DbResult { + self.get::(br_id).map(|cell| cell.0) + } + + // Mappings + + pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult> { + self.get_opt::(hash) + .map(|opt| opt.map(|cell| cell.0)) + } + + pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult> { + self.get_opt::(tx_hash) + .map(|opt| opt.map(|cell| cell.0)) + } + + // Accounts meta + + pub(crate) fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult> { + self.get_opt::(acc_id) + .map(|opt| opt.map(|cell| cell.0)) + } +} diff --git a/storage/src/indexer/write_atomic.rs b/storage/src/indexer/write_atomic.rs new file mode 100644 index 00000000..9b661f3b --- /dev/null +++ b/storage/src/indexer/write_atomic.rs @@ -0,0 +1,273 @@ +use std::collections::HashMap; + +use rocksdb::WriteBatch; + +use super::{BREAKPOINT_INTERVAL, Block, DbError, DbResult, RocksDBIO}; +use crate::{ + DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO as _, + cells::shared_cells::{FirstBlockSetCell, LastBlockCell}, + indexer::indexer_cells::{ + AccNumTxCell, BlockHashToBlockIdMapCell, LastBreakpointIdCell, LastObservedL1LibHeaderCell, + TxHashToBlockIdMapCell, + }, +}; + +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] +impl RocksDBIO { + // Accounts meta + + pub(crate) fn update_acc_meta_batch( + &self, + acc_id: [u8; 32], + num_tx: u64, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&AccNumTxCell(num_tx), acc_id, write_batch) + } + + // Mappings + + pub fn put_block_id_by_hash_batch( + &self, + hash: [u8; 32], + block_id: u64, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&BlockHashToBlockIdMapCell(block_id), hash, write_batch) + } + + pub fn put_block_id_by_tx_hash_batch( + &self, + tx_hash: [u8; 32], + block_id: u64, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&TxHashToBlockIdMapCell(block_id), tx_hash, write_batch) + } + + // Account + + pub fn put_account_transactions( + &self, + acc_id: [u8; 32], + tx_hashes: &[[u8; 32]], + ) -> DbResult<()> { + let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0); + let cf_att = self.account_id_to_tx_hash_column(); + let mut write_batch = WriteBatch::new(); + + for (tx_id, tx_hash) in tx_hashes.iter().enumerate() { + let put_id = acc_num_tx + .checked_add(tx_id.try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"); + + let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) + })?; + let suffix = borsh::to_vec(&put_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned())) + })?; + + prefix.extend_from_slice(&suffix); + + write_batch.put_cf( + &cf_att, + prefix, + borsh::to_vec(tx_hash).map_err(|berr| { + DbError::borsh_cast_message( + berr, + Some("Failed to serialize tx hash".to_owned()), + ) + })?, + ); + } + + self.update_acc_meta_batch( + acc_id, + acc_num_tx + .checked_add(tx_hashes.len().try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"), + &mut write_batch, + )?; + + self.db.write(write_batch).map_err(|rerr| { + DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned())) + }) + } + + pub fn put_account_transactions_dependant( + &self, + acc_id: [u8; 32], + tx_hashes: &[[u8; 32]], + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0); + let cf_att = self.account_id_to_tx_hash_column(); + + for (tx_id, tx_hash) in tx_hashes.iter().enumerate() { + let put_id = acc_num_tx + .checked_add(tx_id.try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"); + + let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) + })?; + let suffix = borsh::to_vec(&put_id).map_err(|berr| { + DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned())) + })?; + + prefix.extend_from_slice(&suffix); + + write_batch.put_cf( + &cf_att, + prefix, + borsh::to_vec(tx_hash).map_err(|berr| { + DbError::borsh_cast_message( + berr, + Some("Failed to serialize tx hash".to_owned()), + ) + })?, + ); + } + + self.update_acc_meta_batch( + acc_id, + acc_num_tx + .checked_add(tx_hashes.len().try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"), + write_batch, + )?; + + Ok(()) + } + + // Meta + + pub fn put_meta_first_block_in_db_batch(&self, block: &Block) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), + ) + })?, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize first block id".to_owned()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + self.put_block(block, [0; 32])?; + Ok(()) + } + + pub fn put_meta_last_block_in_db_batch( + &self, + block_id: u64, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&LastBlockCell(block_id), (), write_batch) + } + + pub fn put_meta_last_observed_l1_lib_header_in_db_batch( + &self, + l1_lib_header: [u8; 32], + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&LastObservedL1LibHeaderCell(l1_lib_header), (), write_batch) + } + + pub fn put_meta_last_breakpoint_id_batch( + &self, + br_id: u64, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&LastBreakpointIdCell(br_id), (), write_batch) + } + + pub fn put_meta_is_first_block_set_batch(&self, write_batch: &mut WriteBatch) -> DbResult<()> { + self.put_batch(&FirstBlockSetCell(true), (), write_batch) + } + + // Block + + pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> { + let cf_block = self.block_column(); + let last_curr_block = self.get_meta_last_block_in_db()?; + let mut write_batch = WriteBatch::default(); + + write_batch.put_cf( + &cf_block, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) + })?, + borsh::to_vec(block).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned())) + })?, + ); + + if block.header.block_id > last_curr_block { + self.put_meta_last_block_in_db_batch(block.header.block_id, &mut write_batch)?; + self.put_meta_last_observed_l1_lib_header_in_db_batch(l1_lib_header, &mut write_batch)?; + } + + self.put_block_id_by_hash_batch( + block.header.hash.into(), + block.header.block_id, + &mut write_batch, + )?; + + let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new(); + + for tx in &block.body.transactions { + let tx_hash = tx.hash(); + + self.put_block_id_by_tx_hash_batch( + tx_hash.into(), + block.header.block_id, + &mut write_batch, + )?; + + let acc_ids = tx + .affected_public_account_ids() + .into_iter() + .map(nssa::AccountId::into_value) + .collect::>(); + + for acc_id in acc_ids { + acc_to_tx_map + .entry(acc_id) + .and_modify(|tx_hashes| tx_hashes.push(tx_hash.into())) + .or_insert_with(|| vec![tx_hash.into()]); + } + } + + #[expect( + clippy::iter_over_hash_type, + reason = "RocksDB will keep ordering persistent" + )] + for (acc_id, tx_hashes) in acc_to_tx_map { + self.put_account_transactions_dependant(acc_id, &tx_hashes, &mut write_batch)?; + } + + self.db.write(write_batch).map_err(|rerr| { + DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned())) + })?; + + if block + .header + .block_id + .is_multiple_of(BREAKPOINT_INTERVAL.into()) + { + self.put_next_breakpoint()?; + } + + Ok(()) + } +} diff --git a/storage/src/indexer/write_non_atomic.rs b/storage/src/indexer/write_non_atomic.rs new file mode 100644 index 00000000..62b466a2 --- /dev/null +++ b/storage/src/indexer/write_non_atomic.rs @@ -0,0 +1,60 @@ +use super::{BREAKPOINT_INTERVAL, DbError, DbResult, RocksDBIO, V03State}; +use crate::{ + DBIO as _, + cells::shared_cells::{FirstBlockSetCell, LastBlockCell}, + indexer::indexer_cells::{ + BreakpointCellRef, LastBreakpointIdCell, LastObservedL1LibHeaderCell, + }, +}; + +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] +impl RocksDBIO { + // Meta + + pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { + self.put(&LastBlockCell(block_id), ()) + } + + pub fn put_meta_last_observed_l1_lib_header_in_db( + &self, + l1_lib_header: [u8; 32], + ) -> DbResult<()> { + self.put(&LastObservedL1LibHeaderCell(l1_lib_header), ()) + } + + pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> { + self.put(&LastBreakpointIdCell(br_id), ()) + } + + pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { + self.put(&FirstBlockSetCell(true), ()) + } + + // State + + pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> { + self.put(&BreakpointCellRef(breakpoint), br_id) + } + + pub fn put_next_breakpoint(&self) -> DbResult<()> { + let last_block = self.get_meta_last_block_in_db()?; + let next_breakpoint_id = self + .get_meta_last_breakpoint_id()? + .checked_add(1) + .expect("Breakpoint Id will be lesser than u64::MAX"); + let block_to_break_id = next_breakpoint_id + .checked_mul(u64::from(BREAKPOINT_INTERVAL)) + .expect("Reached maximum breakpoint id"); + + if block_to_break_id <= last_block { + let next_breakpoint = self.calculate_state_for_id(block_to_break_id)?; + + self.put_breakpoint(next_breakpoint_id, &next_breakpoint)?; + self.put_meta_last_breakpoint_id(next_breakpoint_id) + } else { + Err(DbError::db_interaction_error( + "Breakpoint not yet achieved".to_owned(), + )) + } + } +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 05c4a374..2edb0ee3 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,3 +1,69 @@ +use rocksdb::{DBWithThreadMode, MultiThreaded, WriteBatch}; + +use crate::{ + cells::{SimpleReadableCell, SimpleWritableCell}, + error::DbError, +}; + +pub mod cells; pub mod error; pub mod indexer; pub mod sequencer; + +/// Maximal size of stored blocks in base. +/// +/// Used to control db size. +/// +/// Currently effectively unbounded. +pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; + +/// Size of stored blocks cache in memory. +/// +/// Keeping small to not run out of memory. +pub const CACHE_SIZE: usize = 1000; + +/// Key base for storing metainformation which describe if first block has been set. +pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; +/// Key base for storing metainformation about id of first block in db. +pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; +/// Key base for storing metainformation about id of last current block in db. +pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; + +/// Cell name for a block. +pub const BLOCK_CELL_NAME: &str = "block"; + +/// Interval between state breakpoints. +pub const BREAKPOINT_INTERVAL: u8 = 100; + +/// Name of block column family. +pub const CF_BLOCK_NAME: &str = "cf_block"; +/// Name of meta column family. +pub const CF_META_NAME: &str = "cf_meta"; + +pub type DbResult = Result; + +/// Minimal requirements for DB IO. +pub trait DBIO { + fn db(&self) -> &DBWithThreadMode; + + fn get(&self, params: T::KeyParams) -> DbResult { + T::get(self.db(), params) + } + + fn get_opt(&self, params: T::KeyParams) -> DbResult> { + T::get_opt(self.db(), params) + } + + fn put(&self, cell: &T, params: T::KeyParams) -> DbResult<()> { + cell.put(self.db(), params) + } + + fn put_batch( + &self, + cell: &T, + params: T::KeyParams, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + cell.put_batch(self.db(), params, write_batch) + } +} diff --git a/storage/src/sequencer.rs b/storage/src/sequencer.rs deleted file mode 100644 index 8d072a52..00000000 --- a/storage/src/sequencer.rs +++ /dev/null @@ -1,596 +0,0 @@ -use std::{path::Path, sync::Arc}; - -use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId}; -use nssa::V02State; -use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, -}; - -use crate::error::DbError; - -/// Maximal size of stored blocks in base. -/// -/// Used to control db size. -/// -/// Currently effectively unbounded. -pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; - -/// Size of stored blocks cache in memory. -/// -/// Keeping small to not run out of memory. -pub const CACHE_SIZE: usize = 1000; - -/// Key base for storing metainformation about id of first block in db. -pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; -/// Key base for storing metainformation about id of last current block in db. -pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; -/// Key base for storing metainformation which describe if first block has been set. -pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; -/// Key base for storing metainformation about the last finalized block on Bedrock. -pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; -/// Key base for storing metainformation about the latest block meta. -pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; - -/// Key base for storing the NSSA state. -pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; - -/// Name of block column family. -pub const CF_BLOCK_NAME: &str = "cf_block"; -/// Name of meta column family. -pub const CF_META_NAME: &str = "cf_meta"; -/// Name of state column family. -pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state"; - -pub type DbResult = Result; - -pub struct RocksDBIO { - pub db: DBWithThreadMode, -} - -impl RocksDBIO { - pub fn open_or_create( - path: &Path, - genesis_block: &Block, - genesis_msg_id: MantleMsgId, - ) -> DbResult { - let mut cf_opts = Options::default(); - cf_opts.set_max_write_buffer_number(16); - // ToDo: Add more column families for different data - let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); - let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); - - let mut db_opts = Options::default(); - db_opts.create_missing_column_families(true); - db_opts.create_if_missing(true); - let db = DBWithThreadMode::::open_cf_descriptors( - &db_opts, - path, - vec![cfb, cfmeta, cfstate], - ) - .map_err(|err| DbError::RocksDbError { - error: err, - additional_info: Some("Failed to open or create DB".to_owned()), - })?; - - let dbio = Self { db }; - - let is_start_set = dbio.get_meta_is_first_block_set()?; - if !is_start_set { - let block_id = genesis_block.header.block_id; - dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?; - dbio.put_meta_is_first_block_set()?; - dbio.put_meta_last_block_in_db(block_id)?; - dbio.put_meta_last_finalized_block_id(None)?; - dbio.put_meta_latest_block_meta(&BlockMeta { - id: genesis_block.header.block_id, - hash: genesis_block.header.hash, - msg_id: genesis_msg_id, - })?; - } - - Ok(dbio) - } - - pub fn destroy(path: &Path) -> DbResult<()> { - let mut cf_opts = Options::default(); - cf_opts.set_max_write_buffer_number(16); - // ToDo: Add more column families for different data - let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); - let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); - - let mut db_opts = Options::default(); - db_opts.create_missing_column_families(true); - db_opts.create_if_missing(true); - DBWithThreadMode::::destroy(&db_opts, path) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) - } - - pub fn meta_column(&self) -> Arc> { - self.db.cf_handle(CF_META_NAME).unwrap() - } - - pub fn block_column(&self) -> Arc> { - self.db.cf_handle(CF_BLOCK_NAME).unwrap() - } - - pub fn nssa_state_column(&self) -> Arc> { - self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap() - } - - pub fn get_meta_first_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize first block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "First block not found".to_owned(), - )) - } - } - - pub fn get_meta_last_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Last block not found".to_owned(), - )) - } - } - - pub fn get_meta_is_first_block_set(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - Ok(res.is_some()) - } - - pub fn put_nssa_state_in_db(&self, state: &V02State, batch: &mut WriteBatch) -> DbResult<()> { - let cf_nssa_state = self.nssa_state_column(); - batch.put_cf( - &cf_nssa_state, - borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_NSSA_STATE_KEY".to_owned()), - ) - })?, - borsh::to_vec(state).map_err(|err| { - DbError::borsh_cast_message(err, Some("Failed to serialize NSSA state".to_owned())) - })?, - ); - - Ok(()) - } - - pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize first block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - let mut batch = WriteBatch::default(); - self.put_block(block, msg_id, true, &mut batch)?; - self.db.write(batch).map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some("Failed to write first block in db".to_owned()), - ) - })?; - - Ok(()) - } - - pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - fn put_meta_last_block_in_db_batch( - &self, - block_id: u64, - batch: &mut WriteBatch, - ) -> DbResult<()> { - let cf_meta = self.meta_column(); - batch.put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ); - Ok(()) - } - - pub fn put_meta_last_finalized_block_id(&self, block_id: Option) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), - ) - })?, - [1_u8; 1], - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_meta).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize latest block meta".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - fn put_meta_latest_block_meta_batch( - &self, - block_meta: &BlockMeta, - batch: &mut WriteBatch, - ) -> DbResult<()> { - let cf_meta = self.meta_column(); - batch.put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_meta).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize latest block meta".to_owned()), - ) - })?, - ); - Ok(()) - } - - pub fn latest_block_meta(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize latest block meta".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Latest block meta not found".to_owned(), - )) - } - } - - pub fn put_block( - &self, - block: &Block, - msg_id: MantleMsgId, - first: bool, - batch: &mut WriteBatch, - ) -> DbResult<()> { - let cf_block = self.block_column(); - - if !first { - let last_curr_block = self.get_meta_last_block_in_db()?; - - if block.header.block_id > last_curr_block { - self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?; - self.put_meta_latest_block_meta_batch( - &BlockMeta { - id: block.header.block_id, - hash: block.header.hash, - msg_id, - }, - batch, - )?; - } - } - - batch.put_cf( - &cf_block, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) - })?, - borsh::to_vec(block).map_err(|err| { - DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned())) - })?, - ); - Ok(()) - } - - pub fn get_block(&self, block_id: u64) -> DbResult { - let cf_block = self.block_column(); - let res = self - .db - .get_cf( - &cf_block, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize block data".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Block on this id not found".to_owned(), - )) - } - } - - pub fn get_nssa_state(&self) -> DbResult { - let cf_nssa_state = self.nssa_state_column(); - let res = self - .db - .get_cf( - &cf_nssa_state, - borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize block data".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Block on this id not found".to_owned(), - )) - } - } - - pub fn delete_block(&self, block_id: u64) -> DbResult<()> { - let cf_block = self.block_column(); - let key = borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) - })?; - - if self - .db - .get_cf(&cf_block, &key) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))? - .is_none() - { - return Err(DbError::db_interaction_error( - "Block on this id not found".to_owned(), - )); - } - - self.db - .delete_cf(&cf_block, key) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - Ok(()) - } - - pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> { - let mut block = self.get_block(block_id)?; - block.bedrock_status = BedrockStatus::Finalized; - - let cf_block = self.block_column(); - self.db - .put_cf( - &cf_block, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_owned()), - ) - })?, - borsh::to_vec(&block).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block data".to_owned()), - ) - })?, - ) - .map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some(format!("Failed to mark block {block_id} as finalized")), - ) - })?; - - Ok(()) - } - - pub fn get_all_blocks(&self) -> impl Iterator> { - let cf_block = self.block_column(); - self.db - .iterator_cf(&cf_block, rocksdb::IteratorMode::Start) - .map(|res| { - let (_key, value) = res.map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some("Failed to get key value pair".to_owned()), - ) - })?; - - borsh::from_slice::(&value).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize block data".to_owned()), - ) - }) - }) - } - - pub fn atomic_update( - &self, - block: &Block, - msg_id: MantleMsgId, - state: &V02State, - ) -> DbResult<()> { - let block_id = block.header.block_id; - let mut batch = WriteBatch::default(); - self.put_block(block, msg_id, false, &mut batch)?; - self.put_nssa_state_in_db(state, &mut batch)?; - self.db.write(batch).map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some(format!("Failed to udpate db with block {block_id}")), - ) - }) - } -} diff --git a/storage/src/sequencer/mod.rs b/storage/src/sequencer/mod.rs new file mode 100644 index 00000000..508f6c29 --- /dev/null +++ b/storage/src/sequencer/mod.rs @@ -0,0 +1,349 @@ +use std::{path::Path, sync::Arc}; + +use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId}; +use nssa::V03State; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, +}; + +use crate::{ + CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO, DbResult, + cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell}, + error::DbError, + sequencer::sequencer_cells::{ + LastFinalizedBlockIdCell, LatestBlockMetaCellOwned, LatestBlockMetaCellRef, + NSSAStateCellOwned, NSSAStateCellRef, + }, +}; + +pub mod sequencer_cells; + +/// Key base for storing metainformation about the last finalized block on Bedrock. +pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; +/// Key base for storing metainformation about the latest block meta. +pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; + +/// Key base for storing the NSSA state. +pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; + +/// Name of state column family. +pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state"; + +pub struct RocksDBIO { + pub db: DBWithThreadMode, +} + +impl DBIO for RocksDBIO { + fn db(&self) -> &DBWithThreadMode { + &self.db + } +} + +impl RocksDBIO { + pub fn open_or_create( + path: &Path, + genesis_block: &Block, + genesis_msg_id: MantleMsgId, + ) -> DbResult { + let mut cf_opts = Options::default(); + cf_opts.set_max_write_buffer_number(16); + // ToDo: Add more column families for different data + let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); + let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); + let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); + + let mut db_opts = Options::default(); + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + let db = DBWithThreadMode::::open_cf_descriptors( + &db_opts, + path, + vec![cfb, cfmeta, cfstate], + ) + .map_err(|err| DbError::RocksDbError { + error: err, + additional_info: Some("Failed to open or create DB".to_owned()), + })?; + + let dbio = Self { db }; + + let is_start_set = dbio.get_meta_is_first_block_set()?; + if !is_start_set { + let block_id = genesis_block.header.block_id; + dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?; + dbio.put_meta_is_first_block_set()?; + dbio.put_meta_last_block_in_db(block_id)?; + dbio.put_meta_last_finalized_block_id(None)?; + dbio.put_meta_latest_block_meta(&BlockMeta { + id: genesis_block.header.block_id, + hash: genesis_block.header.hash, + msg_id: genesis_msg_id, + })?; + } + + Ok(dbio) + } + + pub fn destroy(path: &Path) -> DbResult<()> { + let mut cf_opts = Options::default(); + cf_opts.set_max_write_buffer_number(16); + // ToDo: Add more column families for different data + let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); + let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); + let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); + + let mut db_opts = Options::default(); + db_opts.create_missing_column_families(true); + db_opts.create_if_missing(true); + DBWithThreadMode::::destroy(&db_opts, path) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) + } + + // Columns + + pub fn meta_column(&self) -> Arc> { + self.db + .cf_handle(CF_META_NAME) + .expect("Meta column should exist") + } + + pub fn block_column(&self) -> Arc> { + self.db + .cf_handle(CF_BLOCK_NAME) + .expect("Block column should exist") + } + + pub fn nssa_state_column(&self) -> Arc> { + self.db + .cf_handle(CF_NSSA_STATE_NAME) + .expect("State should exist") + } + + // Meta + + pub fn get_meta_first_block_in_db(&self) -> DbResult { + self.get::(()).map(|cell| cell.0) + } + + pub fn get_meta_last_block_in_db(&self) -> DbResult { + self.get::(()).map(|cell| cell.0) + } + + pub fn get_meta_is_first_block_set(&self) -> DbResult { + Ok(self.get_opt::(())?.is_some()) + } + + pub fn put_nssa_state_in_db(&self, state: &V03State, batch: &mut WriteBatch) -> DbResult<()> { + self.put_batch(&NSSAStateCellRef(state), (), batch) + } + + pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()), + ) + })?, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize first block id".to_owned()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + let mut batch = WriteBatch::default(); + self.put_block(block, msg_id, true, &mut batch)?; + self.db.write(batch).map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some("Failed to write first block in db".to_owned()), + ) + })?; + + Ok(()) + } + + pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { + self.put(&LastBlockCell(block_id), ()) + } + + fn put_meta_last_block_in_db_batch( + &self, + block_id: u64, + batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&LastBlockCell(block_id), (), batch) + } + + pub fn put_meta_last_finalized_block_id(&self, block_id: Option) -> DbResult<()> { + self.put(&LastFinalizedBlockIdCell(block_id), ()) + } + + pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { + self.put(&FirstBlockSetCell(true), ()) + } + + fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> { + self.put(&LatestBlockMetaCellRef(block_meta), ()) + } + + fn put_meta_latest_block_meta_batch( + &self, + block_meta: &BlockMeta, + batch: &mut WriteBatch, + ) -> DbResult<()> { + self.put_batch(&LatestBlockMetaCellRef(block_meta), (), batch) + } + + pub fn latest_block_meta(&self) -> DbResult { + self.get::(()).map(|val| val.0) + } + + pub fn put_block( + &self, + block: &Block, + msg_id: MantleMsgId, + first: bool, + batch: &mut WriteBatch, + ) -> DbResult<()> { + let cf_block = self.block_column(); + + if !first { + let last_curr_block = self.get_meta_last_block_in_db()?; + + if block.header.block_id > last_curr_block { + self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?; + self.put_meta_latest_block_meta_batch( + &BlockMeta { + id: block.header.block_id, + hash: block.header.hash, + msg_id, + }, + batch, + )?; + } + } + + batch.put_cf( + &cf_block, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) + })?, + borsh::to_vec(block).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned())) + })?, + ); + Ok(()) + } + + pub fn get_block(&self, block_id: u64) -> DbResult> { + self.get_opt::(block_id) + .map(|opt| opt.map(|val| val.0)) + } + + pub fn get_nssa_state(&self) -> DbResult { + self.get::(()).map(|val| val.0) + } + + pub fn delete_block(&self, block_id: u64) -> DbResult<()> { + let cf_block = self.block_column(); + let key = borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned())) + })?; + + if self + .db + .get_cf(&cf_block, &key) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))? + .is_none() + { + return Err(DbError::db_interaction_error(format!( + "Block with id {block_id} not found" + ))); + } + + self.db + .delete_cf(&cf_block, key) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + Ok(()) + } + + pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> { + let mut block = self.get_block(block_id)?.ok_or_else(|| { + DbError::db_interaction_error(format!("Block with id {block_id} not found")) + })?; + block.bedrock_status = BedrockStatus::Finalized; + + let cf_block = self.block_column(); + self.db + .put_cf( + &cf_block, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block id".to_owned()), + ) + })?, + borsh::to_vec(&block).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block data".to_owned()), + ) + })?, + ) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to mark block {block_id} as finalized")), + ) + })?; + + Ok(()) + } + + pub fn get_all_blocks(&self) -> impl Iterator> { + let cf_block = self.block_column(); + self.db + .iterator_cf(&cf_block, rocksdb::IteratorMode::Start) + .map(|res| { + let (_key, value) = res.map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some("Failed to get key value pair".to_owned()), + ) + })?; + + borsh::from_slice::(&value).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize block data".to_owned()), + ) + }) + }) + } + + pub fn atomic_update( + &self, + block: &Block, + msg_id: MantleMsgId, + state: &V03State, + ) -> DbResult<()> { + let block_id = block.header.block_id; + let mut batch = WriteBatch::default(); + self.put_block(block, msg_id, false, &mut batch)?; + self.put_nssa_state_in_db(state, &mut batch)?; + self.db.write(batch).map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to udpate db with block {block_id}")), + ) + }) + } +} diff --git a/storage/src/sequencer/sequencer_cells.rs b/storage/src/sequencer/sequencer_cells.rs new file mode 100644 index 00000000..0ad092d7 --- /dev/null +++ b/storage/src/sequencer/sequencer_cells.rs @@ -0,0 +1,132 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use common::block::BlockMeta; +use nssa::V03State; + +use crate::{ + CF_META_NAME, DbResult, + cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell}, + error::DbError, + sequencer::{ + CF_NSSA_STATE_NAME, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY, + DB_NSSA_STATE_KEY, + }, +}; + +#[derive(BorshDeserialize)] +pub struct NSSAStateCellOwned(pub V03State); + +impl SimpleStorableCell for NSSAStateCellOwned { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_NSSA_STATE_KEY; + const CF_NAME: &'static str = CF_NSSA_STATE_NAME; +} + +impl SimpleReadableCell for NSSAStateCellOwned {} + +#[derive(BorshSerialize)] +pub struct NSSAStateCellRef<'state>(pub &'state V03State); + +impl SimpleStorableCell for NSSAStateCellRef<'_> { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_NSSA_STATE_KEY; + const CF_NAME: &'static str = CF_NSSA_STATE_NAME; +} + +impl SimpleWritableCell for NSSAStateCellRef<'_> { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize last state".to_owned())) + }) + } +} + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub struct LastFinalizedBlockIdCell(pub Option); + +impl SimpleStorableCell for LastFinalizedBlockIdCell { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LAST_FINALIZED_BLOCK_ID; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for LastFinalizedBlockIdCell {} + +impl SimpleWritableCell for LastFinalizedBlockIdCell { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last finalized block id".to_owned()), + ) + }) + } +} + +#[derive(BorshDeserialize)] +pub struct LatestBlockMetaCellOwned(pub BlockMeta); + +impl SimpleStorableCell for LatestBlockMetaCellOwned { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleReadableCell for LatestBlockMetaCellOwned {} + +#[derive(BorshSerialize)] +pub struct LatestBlockMetaCellRef<'blockmeta>(pub &'blockmeta BlockMeta); + +impl SimpleStorableCell for LatestBlockMetaCellRef<'_> { + type KeyParams = (); + + const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY; + const CF_NAME: &'static str = CF_META_NAME; +} + +impl SimpleWritableCell for LatestBlockMetaCellRef<'_> { + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize last block meta".to_owned())) + }) + } +} + +#[cfg(test)] +mod uniform_tests { + use crate::{ + cells::SimpleStorableCell as _, + sequencer::sequencer_cells::{ + LatestBlockMetaCellOwned, LatestBlockMetaCellRef, NSSAStateCellOwned, NSSAStateCellRef, + }, + }; + + #[test] + fn state_ref_and_owned_is_aligned() { + assert_eq!(NSSAStateCellRef::CELL_NAME, NSSAStateCellOwned::CELL_NAME); + assert_eq!(NSSAStateCellRef::CF_NAME, NSSAStateCellOwned::CF_NAME); + assert_eq!( + NSSAStateCellRef::key_constructor(()).unwrap(), + NSSAStateCellOwned::key_constructor(()).unwrap() + ); + } + + #[test] + fn block_meta_ref_and_owned_is_aligned() { + assert_eq!( + LatestBlockMetaCellRef::CELL_NAME, + LatestBlockMetaCellOwned::CELL_NAME + ); + assert_eq!( + LatestBlockMetaCellRef::CF_NAME, + LatestBlockMetaCellOwned::CF_NAME + ); + assert_eq!( + LatestBlockMetaCellRef::key_constructor(()).unwrap(), + LatestBlockMetaCellOwned::key_constructor(()).unwrap() + ); + } +} diff --git a/test_program_methods/guest/Cargo.toml b/test_program_methods/guest/Cargo.toml index 1ca958b3..46edeb61 100644 --- a/test_program_methods/guest/Cargo.toml +++ b/test_program_methods/guest/Cargo.toml @@ -9,5 +9,7 @@ workspace = true [dependencies] nssa_core.workspace = true +clock_core.workspace = true risc0-zkvm.workspace = true +serde = { workspace = true, default-features = false } diff --git a/test_program_methods/guest/src/bin/burner.rs b/test_program_methods/guest/src/bin/burner.rs index a2256aa3..02be2d38 100644 --- a/test_program_methods/guest/src/bin/burner.rs +++ b/test_program_methods/guest/src/bin/burner.rs @@ -1,10 +1,12 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = u128; fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: balance_to_burn, }, @@ -19,9 +21,12 @@ fn main() { let mut account_post = account_pre.clone(); account_post.balance = account_post.balance.saturating_sub(balance_to_burn); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], vec![AccountPostState::new(account_post)], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/chain_caller.rs b/test_program_methods/guest/src/bin/chain_caller.rs index 7e67fa9b..5c124bed 100644 --- a/test_program_methods/guest/src/bin/chain_caller.rs +++ b/test_program_methods/guest/src/bin/chain_caller.rs @@ -1,6 +1,6 @@ use nssa_core::program::{ - AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, read_nssa_inputs, - write_nssa_outputs_with_chained_call, + AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput, + read_nssa_inputs, }; use risc0_zkvm::serde::to_vec; @@ -13,6 +13,8 @@ type Instruction = (u128, ProgramId, u32, Option); fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (balance, auth_transfer_id, num_chain_calls, pda_seed), }, @@ -38,7 +40,7 @@ fn main() { program_id: auth_transfer_id, instruction_data: instruction_data.clone(), pre_states: vec![running_sender_pre.clone(), running_recipient_pre.clone()], /* <- Account order permutation here */ - pda_seeds: pda_seed.iter().cloned().collect(), + pda_seeds: pda_seed.iter().copied().collect(), }; chained_calls.push(new_chained_call); @@ -54,13 +56,16 @@ fn main() { }; } - write_nssa_outputs_with_chained_call( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![sender_pre.clone(), recipient_pre.clone()], vec![ AccountPostState::new(sender_pre.account), AccountPostState::new(recipient_pre.account), ], - chained_calls, - ); + ) + .with_chained_calls(chained_calls) + .write(); } diff --git a/test_program_methods/guest/src/bin/changer_claimer.rs b/test_program_methods/guest/src/bin/changer_claimer.rs index 37079737..6d2b51b4 100644 --- a/test_program_methods/guest/src/bin/changer_claimer.rs +++ b/test_program_methods/guest/src/bin/changer_claimer.rs @@ -1,4 +1,4 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (Option>, bool); @@ -6,6 +6,8 @@ type Instruction = (Option>, bool); fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (data_opt, should_claim), }, @@ -28,10 +30,17 @@ fn main() { // Claim or not based on the boolean flag let post_state = if should_claim { - AccountPostState::new_claimed(account_post) + AccountPostState::new_claimed(account_post, Claim::Authorized) } else { AccountPostState::new(account_post) }; - write_nssa_outputs(instruction_words, vec![pre], vec![post_state]); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pre], + vec![post_state], + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/claimer.rs b/test_program_methods/guest/src/bin/claimer.rs index 897ca6a6..a3a7fb19 100644 --- a/test_program_methods/guest/src/bin/claimer.rs +++ b/test_program_methods/guest/src/bin/claimer.rs @@ -1,10 +1,12 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (), }, @@ -15,7 +17,14 @@ fn main() { return; }; - let account_post = AccountPostState::new_claimed(pre.account.clone()); + let account_post = AccountPostState::new_claimed(pre.account.clone(), Claim::Authorized); - write_nssa_outputs(instruction_words, vec![pre], vec![account_post]); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pre], + vec![account_post], + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/clock_chain_caller.rs b/test_program_methods/guest/src/bin/clock_chain_caller.rs new file mode 100644 index 00000000..cdbe5214 --- /dev/null +++ b/test_program_methods/guest/src/bin/clock_chain_caller.rs @@ -0,0 +1,46 @@ +use nssa_core::{ + Timestamp, + program::{ + AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs, + }, +}; +use risc0_zkvm::serde::to_vec; + +type Instruction = (ProgramId, Timestamp); // (clock_program_id, timestamp) + +/// A program that chain-calls the clock program with the clock accounts it received as pre-states. +/// Used in tests to verify that user transactions cannot modify clock accounts, even indirectly +/// via chain calls. +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: (clock_program_id, timestamp), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let post_states: Vec<_> = pre_states + .iter() + .map(|pre| AccountPostState::new(pre.account.clone())) + .collect(); + + let chained_call = ChainedCall { + program_id: clock_program_id, + instruction_data: to_vec(×tamp).unwrap(), + pre_states: pre_states.clone(), + pda_seeds: vec![], + }; + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states, + post_states, + ) + .with_chained_calls(vec![chained_call]) + .write(); +} diff --git a/test_program_methods/guest/src/bin/data_changer.rs b/test_program_methods/guest/src/bin/data_changer.rs index c689dce5..3969d7f6 100644 --- a/test_program_methods/guest/src/bin/data_changer.rs +++ b/test_program_methods/guest/src/bin/data_changer.rs @@ -1,4 +1,4 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = Vec; @@ -6,6 +6,8 @@ type Instruction = Vec; fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: data, }, @@ -22,9 +24,15 @@ fn main() { .try_into() .expect("provided data should fit into data limit"); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], - vec![AccountPostState::new_claimed(account_post)], - ); + vec![AccountPostState::new_claimed( + account_post, + Claim::Authorized, + )], + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/extra_output.rs b/test_program_methods/guest/src/bin/extra_output.rs index 4d67df6e..3a5df556 100644 --- a/test_program_methods/guest/src/bin/extra_output.rs +++ b/test_program_methods/guest/src/bin/extra_output.rs @@ -1,12 +1,20 @@ use nssa_core::{ account::Account, - program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}, + program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}, }; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let Ok([pre]) = <[_; 1]>::try_from(pre_states) else { return; @@ -14,12 +22,15 @@ fn main() { let account_pre = pre.account.clone(); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], vec![ AccountPostState::new(account_pre), AccountPostState::new(Account::default()), ], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/flash_swap_callback.rs b/test_program_methods/guest/src/bin/flash_swap_callback.rs new file mode 100644 index 00000000..251833bb --- /dev/null +++ b/test_program_methods/guest/src/bin/flash_swap_callback.rs @@ -0,0 +1,94 @@ +//! Flash swap callback, the user logic step in the "prep → callback → assert" pattern. +//! +//! # Role +//! +//! This program is called as chained call 2 in the flash swap sequence: +//! 1. Token transfer out (vault → receiver) +//! 2. **This callback** (user logic) +//! 3. Invariant check (assert vault balance restored) +//! +//! In a real flash swap, this would contain the user's arbitrage or other logic. +//! In this test program, it is controlled by `return_funds`: +//! +//! - `return_funds = true`: emits a token transfer (receiver → vault) to return the funds. The +//! invariant check will pass and the transaction will succeed. +//! +//! - `return_funds = false`: emits no transfers. Funds stay with the receiver. The invariant check +//! will fail (vault balance < initial), causing full atomic rollback. This simulates a malicious +//! or buggy callback that does not repay the flash loan. +//! +//! # Note on `caller_program_id` +//! +//! This program does not enforce any access control on `caller_program_id`. +//! It is designed to be called by the flash swap initiator but could in principle be +//! called by any program. In production, a callback would typically verify the caller +//! if it needs to trust the context it is called from. + +use nssa_core::program::{ + AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput, + read_nssa_inputs, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct CallbackInstruction { + /// If true, return the borrowed funds to the vault (happy path). + /// If false, keep the funds (simulates a malicious callback, triggers rollback). + pub return_funds: bool, + pub token_program_id: ProgramId, + pub amount: u128, +} + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, // not enforced in this callback + pre_states, + instruction, + }, + instruction_words, + ) = read_nssa_inputs::(); + + // pre_states[0] = vault (after transfer out), pre_states[1] = receiver (after transfer out) + let Ok([vault_pre, receiver_pre]) = <[_; 2]>::try_from(pre_states) else { + panic!("Callback requires exactly 2 accounts: vault, receiver"); + }; + + let mut chained_calls = Vec::new(); + + if instruction.return_funds { + // Happy path: return the borrowed funds via a token transfer (receiver → vault). + // The receiver is a PDA of this callback program (seed = [1_u8; 32]). + // Mark the receiver as authorized since it will be PDA-authorized in this chained call. + let mut receiver_authorized = receiver_pre.clone(); + receiver_authorized.is_authorized = true; + let transfer_instruction = risc0_zkvm::serde::to_vec(&instruction.amount) + .expect("transfer instruction serialization"); + + chained_calls.push(ChainedCall { + program_id: instruction.token_program_id, + pre_states: vec![receiver_authorized, vault_pre.clone()], + instruction_data: transfer_instruction, + pda_seeds: vec![PdaSeed::new([1_u8; 32])], + }); + } + // Malicious path (return_funds = false): emit no chained calls. + // The vault balance will not be restored, so the invariant check in the initiator + // will panic, rolling back the entire transaction including the initial transfer out. + + // The callback itself makes no direct state changes, accounts pass through unchanged. + // All mutations go through the token program via chained calls. + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![vault_pre.clone(), receiver_pre.clone()], + vec![ + AccountPostState::new(vault_pre.account), + AccountPostState::new(receiver_pre.account), + ], + ) + .with_chained_calls(chained_calls) + .write(); +} diff --git a/test_program_methods/guest/src/bin/flash_swap_initiator.rs b/test_program_methods/guest/src/bin/flash_swap_initiator.rs new file mode 100644 index 00000000..27d1f317 --- /dev/null +++ b/test_program_methods/guest/src/bin/flash_swap_initiator.rs @@ -0,0 +1,216 @@ +//! Flash swap initiator, demonstrates the "prep → callback → assert" pattern using +//! generalized multi tail-calls with `self_program_id` and `caller_program_id`. +//! +//! # Pattern +//! +//! A flash swap lets a program optimistically transfer tokens out, run arbitrary user +//! logic (the callback), then assert that invariants hold after the callback. The entire +//! sequence is a single atomic transaction: if any step fails, all state changes roll back. +//! +//! # How it works +//! +//! This program handles two instruction variants: +//! +//! - `Initiate` (external): the top-level entrypoint. Emits 3 chained calls: +//! 1. Token transfer out (vault → receiver) +//! 2. User callback (arbitrary logic, e.g. arbitrage) +//! 3. Self-call to `InvariantCheck` (using `self_program_id` to reference itself) +//! +//! - `InvariantCheck` (internal): enforces that the vault balance was restored after the callback. +//! Uses `caller_program_id == Some(self_program_id)` to prevent standalone calls (this is the +//! visibility enforcement mechanism). +//! +//! # What this demonstrates +//! +//! - `self_program_id`: enables a program to chain back to itself (step 3 above) +//! - `caller_program_id`: enables a program to restrict which callers can invoke an instruction +//! - Computed intermediate states: the initiator computes expected intermediate account states from +//! the `pre_states` and amount, keeping the instruction minimal. +//! - Atomic rollback: if the callback doesn't return funds, the invariant check fails, and all +//! state changes from steps 1 and 2 are rolled back automatically. +//! +//! # Tests +//! +//! See `nssa/src/state.rs` for integration tests: +//! - `flash_swap_successful`: full round-trip, funds returned, state unchanged +//! - `flash_swap_callback_keeps_funds_rollback`: callback keeps funds, full rollback +//! - `flash_swap_self_call_targets_correct_program`: zero-amount self-call isolation test +//! - `flash_swap_standalone_invariant_check_rejected`: `caller_program_id` access control + +use nssa_core::program::{ + AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput, + read_nssa_inputs, +}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub enum FlashSwapInstruction { + /// External entrypoint: initiate a flash swap. + /// + /// Emits 3 chained calls: + /// 1. Token transfer (vault → receiver, `amount_out`) + /// 2. Callback (user logic, e.g. arbitrage) + /// 3. Self-call `InvariantCheck` (verify vault balance did not decrease) + /// + /// Intermediate account states are computed inside the program from `pre_states` and + /// `amount_out`. + Initiate { + token_program_id: ProgramId, + callback_program_id: ProgramId, + amount_out: u128, + callback_instruction_data: Vec, + }, + /// Internal: verify the vault invariant holds after callback execution. + /// + /// Access control: only callable as a chained call from this program itself. + /// This is enforced by checking `caller_program_id == Some(self_program_id)`. + /// Any attempt to call this instruction as a standalone top-level transaction + /// will be rejected because `caller_program_id` will be `None`. + InvariantCheck { min_vault_balance: u128 }, +} + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction, + }, + instruction_words, + ) = read_nssa_inputs::(); + + match instruction { + FlashSwapInstruction::Initiate { + token_program_id, + callback_program_id, + amount_out, + callback_instruction_data, + } => { + let Ok([vault_pre, receiver_pre]) = <[_; 2]>::try_from(pre_states) else { + panic!("Initiate requires exactly 2 accounts: vault, receiver"); + }; + + // Capture initial vault balance, the invariant check will verify it is restored. + let min_vault_balance = vault_pre.account.balance; + + // Compute intermediate account states from pre_states and amount_out. + let mut vault_after_transfer = vault_pre.clone(); + vault_after_transfer.account.balance = vault_pre + .account + .balance + .checked_sub(amount_out) + .expect("vault has insufficient balance for flash swap"); + + let mut receiver_after_transfer = receiver_pre.clone(); + receiver_after_transfer.account.balance = receiver_pre + .account + .balance + .checked_add(amount_out) + .expect("receiver balance overflow"); + + let mut vault_after_callback = vault_after_transfer.clone(); + vault_after_callback.account.balance = vault_after_transfer + .account + .balance + .checked_add(amount_out) + .expect("vault balance overflow after callback"); + + // Chained call 1: Token transfer (vault → receiver). + // The vault is a PDA of this initiator program (seed = [0_u8; 32]), so we provide + // the PDA seed to authorize the token program to debit the vault on our behalf. + // Mark the vault as authorized since it will be PDA-authorized in this chained call. + let mut vault_authorized = vault_pre.clone(); + vault_authorized.is_authorized = true; + let transfer_instruction = + risc0_zkvm::serde::to_vec(&amount_out).expect("transfer instruction serialization"); + let call_1 = ChainedCall { + program_id: token_program_id, + pre_states: vec![vault_authorized, receiver_pre.clone()], + instruction_data: transfer_instruction, + pda_seeds: vec![PdaSeed::new([0_u8; 32])], + }; + + // Chained call 2: User callback. + // Receives the post-transfer states as its pre_states. The callback may run + // arbitrary logic (arbitrage, etc.) and is expected to return funds to the vault. + let call_2 = ChainedCall { + program_id: callback_program_id, + pre_states: vec![vault_after_transfer, receiver_after_transfer], + instruction_data: callback_instruction_data, + pda_seeds: vec![], + }; + + // Chained call 3: Self-call to enforce the invariant. + // Uses `self_program_id` to reference this program, the key feature that enables + // the "prep → callback → assert" pattern without a separate checker program. + // If the callback did not return funds, vault_after_callback.balance < + // min_vault_balance and this call will panic, rolling back the entire + // transaction. + let invariant_instruction = + risc0_zkvm::serde::to_vec(&FlashSwapInstruction::InvariantCheck { + min_vault_balance, + }) + .expect("invariant instruction serialization"); + let call_3 = ChainedCall { + program_id: self_program_id, // self-referential chained call + pre_states: vec![vault_after_callback], + instruction_data: invariant_instruction, + pda_seeds: vec![], + }; + + // The initiator itself makes no direct state changes. + // All mutations happen inside the chained calls (token transfers). + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![vault_pre.clone(), receiver_pre.clone()], + vec![ + AccountPostState::new(vault_pre.account), + AccountPostState::new(receiver_pre.account), + ], + ) + .with_chained_calls(vec![call_1, call_2, call_3]) + .write(); + } + + FlashSwapInstruction::InvariantCheck { min_vault_balance } => { + // Visibility enforcement: `InvariantCheck` is an internal instruction. + // It must only be called as a chained call from this program itself (via `Initiate`). + // When called as a top-level transaction, `caller_program_id` is `None` → panics. + // When called as a chained call from `Initiate`, `caller_program_id` is + // `Some(self_program_id)` → passes. + assert_eq!( + caller_program_id, + Some(self_program_id), + "InvariantCheck is an internal instruction: must be called by flash_swap_initiator \ + via a chained call", + ); + + let Ok([vault]) = <[_; 1]>::try_from(pre_states) else { + panic!("InvariantCheck requires exactly 1 account: vault"); + }; + + // The core invariant: vault balance must not have decreased. + // If the callback returned funds, this passes. If not, this panics and + // the entire transaction (including the prior token transfer) rolls back. + assert!( + vault.account.balance >= min_vault_balance, + "Flash swap invariant violated: vault balance {} < minimum {}", + vault.account.balance, + min_vault_balance + ); + + // Pass-through: no state changes in the invariant check step. + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![vault.clone()], + vec![AccountPostState::new(vault.account)], + ) + .write(); + } + } +} diff --git a/test_program_methods/guest/src/bin/malicious_authorization_changer.rs b/test_program_methods/guest/src/bin/malicious_authorization_changer.rs index 56ba7e72..f7aba4a0 100644 --- a/test_program_methods/guest/src/bin/malicious_authorization_changer.rs +++ b/test_program_methods/guest/src/bin/malicious_authorization_changer.rs @@ -1,8 +1,7 @@ use nssa_core::{ account::AccountWithMetadata, program::{ - AccountPostState, ChainedCall, ProgramId, ProgramInput, read_nssa_inputs, - write_nssa_outputs_with_chained_call, + AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs, }, }; use risc0_zkvm::serde::to_vec; @@ -15,6 +14,8 @@ type Instruction = (u128, ProgramId); fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: (balance, transfer_program_id), }, @@ -40,13 +41,16 @@ fn main() { pda_seeds: vec![], }; - write_nssa_outputs_with_chained_call( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![sender.clone(), receiver.clone()], vec![ AccountPostState::new(sender.account), AccountPostState::new(receiver.account), ], - vec![chained_call], - ); + ) + .with_chained_calls(vec![chained_call]) + .write(); } diff --git a/test_program_methods/guest/src/bin/malicious_caller_program_id.rs b/test_program_methods/guest/src/bin/malicious_caller_program_id.rs new file mode 100644 index 00000000..2326190e --- /dev/null +++ b/test_program_methods/guest/src/bin/malicious_caller_program_id.rs @@ -0,0 +1,34 @@ +use nssa_core::program::{ + AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, ProgramOutput, read_nssa_inputs, +}; + +type Instruction = (); + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id: _, // ignore the actual caller + pre_states, + instruction: (), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let post_states = pre_states + .iter() + .map(|a| AccountPostState::new(a.account.clone())) + .collect(); + + // Deliberately output wrong caller_program_id. + // A real caller_program_id is None for a top-level call, so we spoof Some(DEFAULT_PROGRAM_ID) + // to simulate a program claiming it was invoked by another program when it was not. + ProgramOutput::new( + self_program_id, + Some(DEFAULT_PROGRAM_ID), // WRONG: should be None for a top-level call + instruction_words, + pre_states, + post_states, + ) + .write(); +} diff --git a/test_program_methods/guest/src/bin/malicious_self_program_id.rs b/test_program_methods/guest/src/bin/malicious_self_program_id.rs new file mode 100644 index 00000000..be447ab9 --- /dev/null +++ b/test_program_methods/guest/src/bin/malicious_self_program_id.rs @@ -0,0 +1,32 @@ +use nssa_core::program::{ + AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, ProgramOutput, read_nssa_inputs, +}; + +type Instruction = (); + +fn main() { + let ( + ProgramInput { + self_program_id: _, // ignore the correct ID + caller_program_id, + pre_states, + instruction: (), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let post_states = pre_states + .iter() + .map(|a| AccountPostState::new(a.account.clone())) + .collect(); + + // Deliberately output wrong self_program_id + ProgramOutput::new( + DEFAULT_PROGRAM_ID, // WRONG: should be self_program_id + caller_program_id, + instruction_words, + pre_states, + post_states, + ) + .write(); +} diff --git a/test_program_methods/guest/src/bin/minter.rs b/test_program_methods/guest/src/bin/minter.rs index a602df56..1f31ca05 100644 --- a/test_program_methods/guest/src/bin/minter.rs +++ b/test_program_methods/guest/src/bin/minter.rs @@ -1,9 +1,17 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let Ok([pre]) = <[_; 1]>::try_from(pre_states) else { return; @@ -16,9 +24,12 @@ fn main() { .checked_add(1) .expect("Balance overflow"); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], vec![AccountPostState::new(account_post)], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/missing_output.rs b/test_program_methods/guest/src/bin/missing_output.rs index 52ca6e2f..d7d2778d 100644 --- a/test_program_methods/guest/src/bin/missing_output.rs +++ b/test_program_methods/guest/src/bin/missing_output.rs @@ -1,9 +1,17 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let Ok([pre1, pre2]) = <[_; 2]>::try_from(pre_states) else { return; @@ -11,9 +19,12 @@ fn main() { let account_pre1 = pre1.account.clone(); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre1, pre2], vec![AccountPostState::new(account_pre1)], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/modified_transfer.rs b/test_program_methods/guest/src/bin/modified_transfer.rs index 3aee3816..2c05921c 100644 --- a/test_program_methods/guest/src/bin/modified_transfer.rs +++ b/test_program_methods/guest/src/bin/modified_transfer.rs @@ -5,7 +5,7 @@ use nssa_core::{ account::{Account, AccountWithMetadata}, - program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}, + program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}, }; /// Initializes a default account under the ownership of this program. @@ -64,6 +64,8 @@ fn main() { // Read input accounts. let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: balance_to_move, }, @@ -80,5 +82,12 @@ fn main() { } _ => panic!("invalid params"), }; - write_nssa_outputs(instruction_data, pre_states, post_states); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_data, + pre_states, + post_states, + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/nonce_changer.rs b/test_program_methods/guest/src/bin/nonce_changer.rs index 52d2e392..c6e851fe 100644 --- a/test_program_methods/guest/src/bin/nonce_changer.rs +++ b/test_program_methods/guest/src/bin/nonce_changer.rs @@ -1,9 +1,17 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let Ok([pre]) = <[_; 1]>::try_from(pre_states) else { return; @@ -13,9 +21,12 @@ fn main() { let mut account_post = account_pre.clone(); account_post.nonce.public_account_nonce_increment(); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], vec![AccountPostState::new(account_post)], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/noop.rs b/test_program_methods/guest/src/bin/noop.rs index 79dd1dec..fc92aebe 100644 --- a/test_program_methods/guest/src/bin/noop.rs +++ b/test_program_methods/guest/src/bin/noop.rs @@ -1,13 +1,28 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let post_states = pre_states .iter() .map(|account| AccountPostState::new(account.account.clone())) .collect(); - write_nssa_outputs(instruction_words, pre_states, post_states); + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + pre_states, + post_states, + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/pinata_cooldown.rs b/test_program_methods/guest/src/bin/pinata_cooldown.rs new file mode 100644 index 00000000..9e8bde3b --- /dev/null +++ b/test_program_methods/guest/src/bin/pinata_cooldown.rs @@ -0,0 +1,116 @@ +//! Cooldown-based pinata program. +//! +//! A Piñata program that uses the on-chain clock to prevent abuse. +//! After each prize claim the program records the current timestamp; the next claim is only +//! allowed once a configurable cooldown period has elapsed. +//! +//! Expected pre-states (in order): +//! 0 - pinata account (authorized, owned by this program) +//! 1 - winner account +//! 2 - clock account `CLOCK_01`. +//! +//! Pinata account data layout (24 bytes): +//! [prize: u64 LE | `cooldown_ms`: u64 LE | `last_claim_timestamp`: u64 LE]. + +use clock_core::{CLOCK_01_PROGRAM_ACCOUNT_ID, ClockAccountData}; +use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs}; + +type Instruction = (); + +struct PinataState { + prize: u128, + cooldown_ms: u64, + last_claim_timestamp: u64, +} + +impl PinataState { + fn from_bytes(bytes: &[u8]) -> Self { + assert!(bytes.len() >= 32, "Pinata account data too short"); + let prize = u128::from_le_bytes(bytes[..16].try_into().unwrap()); + let cooldown_ms = u64::from_le_bytes(bytes[16..24].try_into().unwrap()); + let last_claim_timestamp = u64::from_le_bytes(bytes[24..32].try_into().unwrap()); + Self { + prize, + cooldown_ms, + last_claim_timestamp, + } + } + + fn to_bytes(&self) -> Vec { + let mut buf = Vec::with_capacity(32); + buf.extend_from_slice(&self.prize.to_le_bytes()); + buf.extend_from_slice(&self.cooldown_ms.to_le_bytes()); + buf.extend_from_slice(&self.last_claim_timestamp.to_le_bytes()); + buf + } +} + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: (), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let Ok([pinata, winner, clock_pre]) = <[_; 3]>::try_from(pre_states) else { + panic!("Expected exactly 3 input accounts: pinata, winner, clock"); + }; + + // Check the clock account is the system clock account + assert_eq!(clock_pre.account_id, CLOCK_01_PROGRAM_ACCOUNT_ID); + + let clock_data = ClockAccountData::from_bytes(&clock_pre.account.data.clone().into_inner()); + let current_timestamp = clock_data.timestamp; + + let pinata_state = PinataState::from_bytes(&pinata.account.data.clone().into_inner()); + + // Enforce cooldown: the elapsed time since the last claim must exceed the cooldown period. + let elapsed = current_timestamp.saturating_sub(pinata_state.last_claim_timestamp); + assert!( + elapsed >= pinata_state.cooldown_ms, + "Cooldown not elapsed: {elapsed}ms since last claim, need {}ms", + pinata_state.cooldown_ms, + ); + + let mut pinata_post = pinata.account.clone(); + let mut winner_post = winner.account.clone(); + + pinata_post.balance = pinata_post + .balance + .checked_sub(pinata_state.prize) + .expect("Not enough balance in the pinata"); + winner_post.balance = winner_post + .balance + .checked_add(pinata_state.prize) + .expect("Overflow when adding prize to winner"); + + // Update the last claim timestamp. + let updated_state = PinataState { + last_claim_timestamp: current_timestamp, + ..pinata_state + }; + pinata_post.data = updated_state + .to_bytes() + .try_into() + .expect("Pinata state should fit in account data"); + + // Clock account is read-only. + let clock_post = clock_pre.account.clone(); + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pinata, winner, clock_pre], + vec![ + AccountPostState::new_claimed_if_default(pinata_post, Claim::Authorized), + AccountPostState::new(winner_post), + AccountPostState::new(clock_post), + ], + ) + .write(); +} diff --git a/test_program_methods/guest/src/bin/program_owner_changer.rs b/test_program_methods/guest/src/bin/program_owner_changer.rs index 4b7de0f7..0282b5cc 100644 --- a/test_program_methods/guest/src/bin/program_owner_changer.rs +++ b/test_program_methods/guest/src/bin/program_owner_changer.rs @@ -1,9 +1,17 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = (); fn main() { - let (ProgramInput { pre_states, .. }, instruction_words) = read_nssa_inputs::(); + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + .. + }, + instruction_words, + ) = read_nssa_inputs::(); let Ok([pre]) = <[_; 1]>::try_from(pre_states) else { return; @@ -13,9 +21,12 @@ fn main() { let mut account_post = account_pre.clone(); account_post.program_owner = [0, 1, 2, 3, 4, 5, 6, 7]; - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![pre], vec![AccountPostState::new(account_post)], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/simple_balance_transfer.rs b/test_program_methods/guest/src/bin/simple_balance_transfer.rs index 55bbfcef..f324b371 100644 --- a/test_program_methods/guest/src/bin/simple_balance_transfer.rs +++ b/test_program_methods/guest/src/bin/simple_balance_transfer.rs @@ -1,10 +1,12 @@ -use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; type Instruction = u128; fn main() { let ( ProgramInput { + self_program_id, + caller_program_id, pre_states, instruction: balance, }, @@ -26,12 +28,15 @@ fn main() { .checked_add(balance) .expect("Overflow when adding balance"); - write_nssa_outputs( + ProgramOutput::new( + self_program_id, + caller_program_id, instruction_words, vec![sender_pre, receiver_pre], vec![ AccountPostState::new(sender_post), AccountPostState::new(receiver_post), ], - ); + ) + .write(); } diff --git a/test_program_methods/guest/src/bin/time_locked_transfer.rs b/test_program_methods/guest/src/bin/time_locked_transfer.rs new file mode 100644 index 00000000..25595661 --- /dev/null +++ b/test_program_methods/guest/src/bin/time_locked_transfer.rs @@ -0,0 +1,72 @@ +//! Time-locked transfer program. +//! +//! Demonstrates how a program can include a clock account among its inputs and use the on-chain +//! timestamp in its logic. The transfer only executes when the clock timestamp is at or past a +//! caller-supplied deadline; otherwise the program panics. +//! +//! Expected pre-states (in order): +//! 0 - sender account (authorized) +//! 1 - receiver account +//! 2 - clock account (read-only, e.g. `CLOCK_01`). + +use clock_core::{CLOCK_01_PROGRAM_ACCOUNT_ID, ClockAccountData}; +use nssa_core::program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs}; + +/// (`amount`, `deadline_timestamp`). +type Instruction = (u128, u64); + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: (amount, deadline), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let Ok([sender_pre, receiver_pre, clock_pre]) = <[_; 3]>::try_from(pre_states) else { + panic!("Expected exactly 3 input accounts: sender, receiver, clock"); + }; + + // Check the clock account is the system clock account + assert_eq!(clock_pre.account_id, CLOCK_01_PROGRAM_ACCOUNT_ID); + + // Read the current timestamp from the clock account. + let clock_data = ClockAccountData::from_bytes(&clock_pre.account.data.clone().into_inner()); + + assert!( + clock_data.timestamp >= deadline, + "Transfer is time-locked until timestamp {deadline}, current is {}", + clock_data.timestamp, + ); + + let mut sender_post = sender_pre.account.clone(); + let mut receiver_post = receiver_pre.account.clone(); + + sender_post.balance = sender_post + .balance + .checked_sub(amount) + .expect("Insufficient balance"); + receiver_post.balance = receiver_post + .balance + .checked_add(amount) + .expect("Balance overflow"); + + // Clock account is read-only: post state equals pre state. + let clock_post = clock_pre.account.clone(); + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![sender_pre, receiver_pre, clock_pre], + vec![ + AccountPostState::new(sender_post), + AccountPostState::new(receiver_post), + AccountPostState::new(clock_post), + ], + ) + .write(); +} diff --git a/test_program_methods/guest/src/bin/validity_window.rs b/test_program_methods/guest/src/bin/validity_window.rs new file mode 100644 index 00000000..03100e8e --- /dev/null +++ b/test_program_methods/guest/src/bin/validity_window.rs @@ -0,0 +1,35 @@ +use nssa_core::program::{ + AccountPostState, BlockValidityWindow, ProgramInput, ProgramOutput, TimestampValidityWindow, + read_nssa_inputs, +}; + +type Instruction = (BlockValidityWindow, TimestampValidityWindow); + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: (block_validity_window, timestamp_validity_window), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let Ok([pre]) = <[_; 1]>::try_from(pre_states) else { + return; + }; + + let post = pre.account.clone(); + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pre], + vec![AccountPostState::new(post)], + ) + .with_block_validity_window(block_validity_window) + .with_timestamp_validity_window(timestamp_validity_window) + .write(); +} diff --git a/test_program_methods/guest/src/bin/validity_window_chain_caller.rs b/test_program_methods/guest/src/bin/validity_window_chain_caller.rs new file mode 100644 index 00000000..212418a2 --- /dev/null +++ b/test_program_methods/guest/src/bin/validity_window_chain_caller.rs @@ -0,0 +1,52 @@ +use nssa_core::program::{ + AccountPostState, BlockValidityWindow, ChainedCall, ProgramId, ProgramInput, ProgramOutput, + TimestampValidityWindow, read_nssa_inputs, +}; +use risc0_zkvm::serde::to_vec; + +/// A program that sets a block validity window on its output and chains to another program with a +/// potentially different block validity window. +/// +/// Instruction: (`window`, `chained_program_id`, `chained_window`) +/// The initial output uses `window` and chains to `chained_program_id` with `chained_window`. +/// The chained program (`validity_window`) expects `(BlockValidityWindow, TimestampValidityWindow)` +/// so an unbounded timestamp window is appended automatically. +type Instruction = (BlockValidityWindow, ProgramId, BlockValidityWindow); + +fn main() { + let ( + ProgramInput { + self_program_id, + caller_program_id, + pre_states, + instruction: (block_validity_window, chained_program_id, chained_block_validity_window), + }, + instruction_words, + ) = read_nssa_inputs::(); + + let [pre] = <[_; 1]>::try_from(pre_states.clone()).expect("Expected exactly one pre state"); + let post = pre.account.clone(); + + let chained_instruction = to_vec(&( + chained_block_validity_window, + TimestampValidityWindow::new_unbounded(), + )) + .unwrap(); + let chained_call = ChainedCall { + program_id: chained_program_id, + instruction_data: chained_instruction, + pre_states, + pda_seeds: vec![], + }; + + ProgramOutput::new( + self_program_id, + caller_program_id, + instruction_words, + vec![pre], + vec![AccountPostState::new(post)], + ) + .with_block_validity_window(block_validity_window) + .with_chained_calls(vec![chained_call]) + .write(); +} diff --git a/testnet_initial_state/Cargo.toml b/testnet_initial_state/Cargo.toml new file mode 100644 index 00000000..2b73f479 --- /dev/null +++ b/testnet_initial_state/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "testnet_initial_state" +version = "0.1.0" +edition = "2024" +license.workspace = true + +[dependencies] +key_protocol.workspace = true +nssa.workspace = true +nssa_core.workspace = true +common.workspace = true + +serde.workspace = true + +[lints] +workspace = true diff --git a/testnet_initial_state/src/lib.rs b/testnet_initial_state/src/lib.rs new file mode 100644 index 00000000..1d75d6a1 --- /dev/null +++ b/testnet_initial_state/src/lib.rs @@ -0,0 +1,396 @@ +use common::PINATA_BASE58; +use key_protocol::key_management::{ + KeyChain, + secret_holders::{PrivateKeyHolder, SecretSpendingKey}, +}; +use nssa::{Account, AccountId, Data, PrivateKey, PublicKey, V03State}; +use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point}; +use serde::{Deserialize, Serialize}; + +const PRIVATE_KEY_PUB_ACC_A: [u8; 32] = [ + 16, 162, 106, 154, 236, 125, 52, 184, 35, 100, 238, 174, 69, 197, 41, 77, 187, 10, 118, 75, 0, + 11, 148, 238, 185, 181, 133, 17, 220, 72, 124, 77, +]; + +const PRIVATE_KEY_PUB_ACC_B: [u8; 32] = [ + 113, 121, 64, 177, 204, 85, 229, 214, 178, 6, 109, 191, 29, 154, 63, 38, 242, 18, 244, 219, 8, + 208, 35, 136, 23, 127, 207, 237, 216, 169, 190, 27, +]; + +const SSK_PRIV_ACC_A: [u8; 32] = [ + 93, 13, 190, 240, 250, 33, 108, 195, 176, 40, 144, 61, 4, 28, 58, 112, 53, 161, 42, 238, 155, + 27, 23, 176, 208, 121, 15, 229, 165, 180, 99, 143, +]; + +const SSK_PRIV_ACC_B: [u8; 32] = [ + 48, 175, 124, 10, 230, 240, 166, 14, 249, 254, 157, 226, 208, 124, 122, 177, 203, 139, 192, + 180, 43, 120, 55, 151, 50, 21, 113, 22, 254, 83, 148, 56, +]; + +const NSK_PRIV_ACC_A: [u8; 32] = [ + 25, 21, 186, 59, 180, 224, 101, 64, 163, 208, 228, 43, 13, 185, 100, 123, 156, 47, 80, 179, 72, + 51, 115, 11, 180, 99, 21, 201, 48, 194, 118, 144, +]; + +const NSK_PRIV_ACC_B: [u8; 32] = [ + 99, 82, 190, 140, 234, 10, 61, 163, 15, 211, 179, 54, 70, 166, 87, 5, 182, 68, 117, 244, 217, + 23, 99, 9, 4, 177, 230, 125, 109, 91, 160, 30, +]; + +const VSK_PRIV_ACC_A: [u8; 32] = [ + 5, 85, 114, 119, 141, 187, 202, 170, 122, 253, 198, 81, 150, 8, 155, 21, 192, 65, 24, 124, 116, + 98, 110, 106, 137, 90, 165, 239, 80, 13, 222, 30, +]; + +const VSK_PRIV_ACC_B: [u8; 32] = [ + 205, 32, 76, 251, 255, 236, 96, 119, 61, 111, 65, 100, 75, 218, 12, 22, 17, 170, 55, 226, 21, + 154, 161, 34, 208, 74, 27, 1, 119, 13, 88, 128, +]; + +const VPK_PRIV_ACC_A: [u8; 33] = [ + 2, 210, 206, 38, 213, 4, 182, 198, 220, 47, 93, 148, 61, 84, 148, 250, 158, 45, 8, 81, 48, 80, + 46, 230, 87, 210, 47, 204, 76, 58, 214, 167, 81, +]; + +const VPK_PRIV_ACC_B: [u8; 33] = [ + 2, 79, 110, 46, 203, 29, 206, 205, 18, 86, 27, 189, 104, 103, 113, 181, 110, 53, 78, 172, 11, + 171, 190, 18, 126, 214, 81, 77, 192, 154, 58, 195, 238, +]; + +const NPK_PRIV_ACC_A: [u8; 32] = [ + 167, 108, 50, 153, 74, 47, 151, 188, 140, 79, 195, 31, 181, 9, 40, 167, 201, 32, 175, 129, 45, + 245, 223, 193, 210, 170, 247, 128, 167, 140, 155, 129, +]; + +const NPK_PRIV_ACC_B: [u8; 32] = [ + 32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, + 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165, +]; + +const DEFAULT_PROGRAM_OWNER: [u32; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + +const PUB_ACC_A_INITIAL_BALANCE: u128 = 10000; +const PUB_ACC_B_INITIAL_BALANCE: u128 = 20000; + +const PRIV_ACC_A_INITIAL_BALANCE: u128 = 10000; +const PRIV_ACC_B_INITIAL_BALANCE: u128 = 20000; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PublicAccountPublicInitialData { + pub account_id: AccountId, + pub balance: u128, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PrivateAccountPublicInitialData { + pub npk: nssa_core::NullifierPublicKey, + pub account: nssa_core::account::Account, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PublicAccountPrivateInitialData { + pub account_id: nssa::AccountId, + pub pub_sign_key: nssa::PrivateKey, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrivateAccountPrivateInitialData { + pub account_id: nssa::AccountId, + pub account: nssa_core::account::Account, + pub key_chain: KeyChain, +} + +#[must_use] +pub fn initial_pub_accounts_private_keys() -> Vec { + let acc1_pub_sign_key = PrivateKey::try_new(PRIVATE_KEY_PUB_ACC_A).unwrap(); + + let acc2_pub_sign_key = PrivateKey::try_new(PRIVATE_KEY_PUB_ACC_B).unwrap(); + + vec![ + PublicAccountPrivateInitialData { + account_id: AccountId::from(&PublicKey::new_from_private_key(&acc1_pub_sign_key)), + pub_sign_key: acc1_pub_sign_key, + }, + PublicAccountPrivateInitialData { + account_id: AccountId::from(&PublicKey::new_from_private_key(&acc2_pub_sign_key)), + pub_sign_key: acc2_pub_sign_key, + }, + ] +} + +#[must_use] +pub fn initial_priv_accounts_private_keys() -> Vec { + let key_chain_1 = KeyChain { + secret_spending_key: SecretSpendingKey(SSK_PRIV_ACC_A), + private_key_holder: PrivateKeyHolder { + nullifier_secret_key: NSK_PRIV_ACC_A, + viewing_secret_key: VSK_PRIV_ACC_A, + }, + nullifier_public_key: NullifierPublicKey(NPK_PRIV_ACC_A), + viewing_public_key: Secp256k1Point(VPK_PRIV_ACC_A.to_vec()), + }; + + let key_chain_2 = KeyChain { + secret_spending_key: SecretSpendingKey(SSK_PRIV_ACC_B), + private_key_holder: PrivateKeyHolder { + nullifier_secret_key: NSK_PRIV_ACC_B, + viewing_secret_key: VSK_PRIV_ACC_B, + }, + nullifier_public_key: NullifierPublicKey(NPK_PRIV_ACC_B), + viewing_public_key: Secp256k1Point(VPK_PRIV_ACC_B.to_vec()), + }; + + vec![ + PrivateAccountPrivateInitialData { + account_id: AccountId::from(&key_chain_1.nullifier_public_key), + account: Account { + program_owner: DEFAULT_PROGRAM_OWNER, + balance: PRIV_ACC_A_INITIAL_BALANCE, + data: Data::default(), + nonce: 0.into(), + }, + key_chain: key_chain_1, + }, + PrivateAccountPrivateInitialData { + account_id: AccountId::from(&key_chain_2.nullifier_public_key), + account: Account { + program_owner: DEFAULT_PROGRAM_OWNER, + balance: PRIV_ACC_B_INITIAL_BALANCE, + data: Data::default(), + nonce: 0.into(), + }, + key_chain: key_chain_2, + }, + ] +} + +#[must_use] +pub fn initial_commitments() -> Vec { + initial_priv_accounts_private_keys() + .into_iter() + .map(|data| PrivateAccountPublicInitialData { + npk: data.key_chain.nullifier_public_key.clone(), + account: data.account, + }) + .collect() +} + +#[must_use] +pub fn initial_accounts() -> Vec { + let initial_account_ids = initial_pub_accounts_private_keys() + .into_iter() + .map(|data| data.account_id) + .collect::>(); + + vec![ + PublicAccountPublicInitialData { + account_id: initial_account_ids[0], + balance: PUB_ACC_A_INITIAL_BALANCE, + }, + PublicAccountPublicInitialData { + account_id: initial_account_ids[1], + balance: PUB_ACC_B_INITIAL_BALANCE, + }, + ] +} + +#[must_use] +pub fn initial_state() -> V03State { + let initial_commitments: Vec = initial_commitments() + .iter() + .map(|init_comm_data| { + let npk = &init_comm_data.npk; + + let mut acc = init_comm_data.account.clone(); + + acc.program_owner = nssa::program::Program::authenticated_transfer_program().id(); + + nssa_core::Commitment::new(npk, &acc) + }) + .collect(); + + let init_accs: Vec<(nssa::AccountId, u128)> = initial_accounts() + .iter() + .map(|acc_data| (acc_data.account_id, acc_data.balance)) + .collect(); + + nssa::V03State::new_with_genesis_accounts(&init_accs, &initial_commitments, 0) +} + +#[must_use] +pub fn initial_state_testnet() -> V03State { + let mut state = initial_state(); + + state.add_pinata_program(PINATA_BASE58.parse().unwrap()); + + state +} + +#[cfg(test)] +mod tests { + use std::str::FromStr as _; + + use super::*; + + const PUB_ACC_A_TEXT_ADDR: &str = "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV"; + const PUB_ACC_B_TEXT_ADDR: &str = "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo"; + + const PRIV_ACC_A_TEXT_ADDR: &str = "5ya25h4Xc9GAmrGB2WrTEnEWtQKJwRwQx3Xfo2tucNcE"; + const PRIV_ACC_B_TEXT_ADDR: &str = "E8HwiTyQe4H9HK7icTvn95HQMnzx49mP9A2ddtMLpNaN"; + + #[test] + fn pub_state_consistency() { + let init_accs_private_data = initial_pub_accounts_private_keys(); + let init_accs_pub_data = initial_accounts(); + + assert_eq!( + init_accs_private_data[0].account_id, + init_accs_pub_data[0].account_id + ); + + assert_eq!( + init_accs_private_data[1].account_id, + init_accs_pub_data[1].account_id + ); + + assert_eq!( + init_accs_pub_data[0], + PublicAccountPublicInitialData { + account_id: AccountId::from_str(PUB_ACC_A_TEXT_ADDR).unwrap(), + balance: PUB_ACC_A_INITIAL_BALANCE, + } + ); + + assert_eq!( + init_accs_pub_data[1], + PublicAccountPublicInitialData { + account_id: AccountId::from_str(PUB_ACC_B_TEXT_ADDR).unwrap(), + balance: PUB_ACC_B_INITIAL_BALANCE, + } + ); + } + + #[test] + fn private_state_consistency() { + let init_private_accs_keys = initial_priv_accounts_private_keys(); + let init_comms = initial_commitments(); + + assert_eq!( + init_private_accs_keys[0] + .key_chain + .secret_spending_key + .produce_private_key_holder(None) + .nullifier_secret_key, + init_private_accs_keys[0] + .key_chain + .private_key_holder + .nullifier_secret_key + ); + assert_eq!( + init_private_accs_keys[0] + .key_chain + .secret_spending_key + .produce_private_key_holder(None) + .viewing_secret_key, + init_private_accs_keys[0] + .key_chain + .private_key_holder + .viewing_secret_key + ); + assert_eq!( + init_private_accs_keys[0] + .key_chain + .private_key_holder + .generate_nullifier_public_key(), + init_private_accs_keys[0].key_chain.nullifier_public_key + ); + assert_eq!( + init_private_accs_keys[0] + .key_chain + .private_key_holder + .generate_viewing_public_key(), + init_private_accs_keys[0].key_chain.viewing_public_key + ); + + assert_eq!( + init_private_accs_keys[1] + .key_chain + .secret_spending_key + .produce_private_key_holder(None) + .nullifier_secret_key, + init_private_accs_keys[1] + .key_chain + .private_key_holder + .nullifier_secret_key + ); + assert_eq!( + init_private_accs_keys[1] + .key_chain + .secret_spending_key + .produce_private_key_holder(None) + .viewing_secret_key, + init_private_accs_keys[1] + .key_chain + .private_key_holder + .viewing_secret_key + ); + assert_eq!( + init_private_accs_keys[1] + .key_chain + .private_key_holder + .generate_nullifier_public_key(), + init_private_accs_keys[1].key_chain.nullifier_public_key + ); + assert_eq!( + init_private_accs_keys[1] + .key_chain + .private_key_holder + .generate_viewing_public_key(), + init_private_accs_keys[1].key_chain.viewing_public_key + ); + + assert_eq!( + init_private_accs_keys[0].account_id.to_string(), + PRIV_ACC_A_TEXT_ADDR + ); + assert_eq!( + init_private_accs_keys[1].account_id.to_string(), + PRIV_ACC_B_TEXT_ADDR + ); + + assert_eq!( + init_private_accs_keys[0].key_chain.nullifier_public_key, + init_comms[0].npk + ); + assert_eq!( + init_private_accs_keys[1].key_chain.nullifier_public_key, + init_comms[1].npk + ); + + assert_eq!( + init_comms[0], + PrivateAccountPublicInitialData { + npk: NullifierPublicKey(NPK_PRIV_ACC_A), + account: Account { + program_owner: DEFAULT_PROGRAM_OWNER, + balance: PRIV_ACC_A_INITIAL_BALANCE, + data: Data::default(), + nonce: 0.into(), + }, + } + ); + + assert_eq!( + init_comms[1], + PrivateAccountPublicInitialData { + npk: NullifierPublicKey(NPK_PRIV_ACC_B), + account: Account { + program_owner: DEFAULT_PROGRAM_OWNER, + balance: PRIV_ACC_B_INITIAL_BALANCE, + data: Data::default(), + nonce: 0.into(), + }, + } + ); + } +} diff --git a/wallet-ffi/Cargo.toml b/wallet-ffi/Cargo.toml index 93096e12..0af20a54 100644 --- a/wallet-ffi/Cargo.toml +++ b/wallet-ffi/Cargo.toml @@ -13,8 +13,8 @@ crate-type = ["rlib", "cdylib", "staticlib"] [dependencies] wallet.workspace = true nssa.workspace = true -common.workspace = true nssa_core.workspace = true +sequencer_service_rpc = { workspace = true, features = ["client"] } tokio.workspace = true [build-dependencies] diff --git a/wallet-ffi/src/keys.rs b/wallet-ffi/src/keys.rs index 8030bf5a..4eeadd8f 100644 --- a/wallet-ffi/src/keys.rs +++ b/wallet-ffi/src/keys.rs @@ -123,7 +123,7 @@ pub unsafe extern "C" fn wallet_ffi_get_private_account_keys( }; // NPK is a 32-byte array - let npk_bytes = key_chain.nullifer_public_key.0; + let npk_bytes = key_chain.nullifier_public_key.0; // VPK is a compressed secp256k1 point (33 bytes) let vpk_bytes = key_chain.viewing_public_key.to_bytes(); diff --git a/wallet-ffi/src/lib.rs b/wallet-ffi/src/lib.rs index c36b05e0..d84bf5a3 100644 --- a/wallet-ffi/src/lib.rs +++ b/wallet-ffi/src/lib.rs @@ -28,7 +28,7 @@ use std::sync::OnceLock; -use common::error::ExecutionFailureKind; +use ::wallet::ExecutionFailureKind; // Re-export public types for cbindgen pub use error::WalletFfiError as FfiError; use tokio::runtime::Handle; diff --git a/wallet-ffi/src/pinata.rs b/wallet-ffi/src/pinata.rs index 7c8e21d0..5807db7b 100644 --- a/wallet-ffi/src/pinata.rs +++ b/wallet-ffi/src/pinata.rs @@ -75,11 +75,9 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata( let pinata = Pinata(&wallet); match block_on(pinata.claim(pinata_id, winner_id, solution)) { - Ok(response) => { - let tx_hash = CString::new(response.tx_hash.to_string()) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); - + Ok(tx_hash) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; (*out_result).success = true; @@ -182,10 +180,9 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_already_initializ pinata .claim_private_owned_account_already_initialized(pinata_id, winner_id, solution, proof), ) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash.to_string()) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -268,10 +265,9 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_not_initialized( let pinata = Pinata(&wallet); match block_on(pinata.claim_private_owned_account(pinata_id, winner_id, solution)) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash.to_string()) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; diff --git a/wallet-ffi/src/sync.rs b/wallet-ffi/src/sync.rs index c321feb0..41031d06 100644 --- a/wallet-ffi/src/sync.rs +++ b/wallet-ffi/src/sync.rs @@ -1,5 +1,7 @@ //! Block synchronization functions. +use sequencer_service_rpc::RpcClient as _; + use crate::{ block_on, error::{print_error, WalletFfiError}, @@ -134,10 +136,10 @@ pub unsafe extern "C" fn wallet_ffi_get_current_block_height( } }; - match block_on(wallet.sequencer_client.get_last_block()) { - Ok(response) => { + match block_on(wallet.sequencer_client.get_last_block_id()) { + Ok(last_block_id) => { unsafe { - *out_block_height = response.last_block; + *out_block_height = last_block_id; } WalletFfiError::Success } diff --git a/wallet-ffi/src/transfer.rs b/wallet-ffi/src/transfer.rs index da1892dd..739832ae 100644 --- a/wallet-ffi/src/transfer.rs +++ b/wallet-ffi/src/transfer.rs @@ -73,10 +73,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_public( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.send_public_transfer(from_id, to_id, amount)) { - Ok(response) => { - let tx_hash = CString::new(response.tx_hash.to_string()) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok(tx_hash) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -163,10 +162,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_shielded( match block_on( transfer.send_shielded_transfer_to_outer_account(from_id, to_npk, to_vpk, amount), ) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -244,10 +242,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_deshielded( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.send_deshielded_transfer(from_id, to_id, amount)) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -333,10 +330,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_private( match block_on(transfer.send_private_transfer_to_outer_account(from_id, to_npk, to_vpk, amount)) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -417,10 +413,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_shielded_owned( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.send_shielded_transfer(from_id, to_id, amount)) { - Ok((response, _shared_key)) => { - let tx_hash = CString::new(response.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_key)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -501,10 +496,9 @@ pub unsafe extern "C" fn wallet_ffi_transfer_private_owned( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.send_private_transfer_to_owned_account(from_id, to_id, amount)) { - Ok((response, _shared_keys)) => { - let tx_hash = CString::new(response.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _shared_keys)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -573,10 +567,9 @@ pub unsafe extern "C" fn wallet_ffi_register_public_account( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.register_account(account_id)) { - Ok(response) => { - let tx_hash = CString::new(response.tx_hash.to_string()) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok(tx_hash) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; @@ -645,10 +638,9 @@ pub unsafe extern "C" fn wallet_ffi_register_private_account( let transfer = NativeTokenTransfer(&wallet); match block_on(transfer.register_account_private(account_id)) { - Ok((res, _secret)) => { - let tx_hash = CString::new(res.tx_hash) - .map(std::ffi::CString::into_raw) - .unwrap_or(ptr::null_mut()); + Ok((tx_hash, _secret)) => { + let tx_hash = CString::new(tx_hash.to_string()) + .map_or(ptr::null_mut(), std::ffi::CString::into_raw); unsafe { (*out_result).tx_hash = tx_hash; diff --git a/wallet-ffi/src/wallet.rs b/wallet-ffi/src/wallet.rs index 9117d0ee..93fc20aa 100644 --- a/wallet-ffi/src/wallet.rs +++ b/wallet-ffi/src/wallet.rs @@ -111,8 +111,8 @@ pub unsafe extern "C" fn wallet_ffi_create_new( return ptr::null_mut(); }; - match WalletCore::new_init_storage(config_path, storage_path, None, password) { - Ok(core) => { + match WalletCore::new_init_storage(config_path, storage_path, None, &password) { + Ok((core, _mnemonic)) => { let wrapper = Box::new(WalletWrapper { core: Mutex::new(core), }); diff --git a/wallet/Cargo.toml b/wallet/Cargo.toml index 63e14bb6..4e98b8ef 100644 --- a/wallet/Cargo.toml +++ b/wallet/Cargo.toml @@ -12,10 +12,15 @@ nssa_core.workspace = true nssa.workspace = true common.workspace = true key_protocol.workspace = true +sequencer_service_rpc = { workspace = true, features = ["client"] } token_core.workspace = true amm_core.workspace = true +testnet_initial_state.workspace = true +ata_core.workspace = true +bip39.workspace = true anyhow.workspace = true +thiserror.workspace = true serde_json.workspace = true env_logger.workspace = true log.workspace = true @@ -25,8 +30,6 @@ humantime.workspace = true tokio = { workspace = true, features = ["macros"] } clap.workspace = true base58.workspace = true -base64.workspace = true -borsh.workspace = true hex.workspace = true rand.workspace = true itertools.workspace = true diff --git a/wallet/configs/debug/wallet_config.json b/wallet/configs/debug/wallet_config.json index aae6293e..6604f65b 100644 --- a/wallet/configs/debug/wallet_config.json +++ b/wallet/configs/debug/wallet_config.json @@ -1,147 +1,411 @@ { - "override_rust_log": null, - "sequencer_addr": "http://127.0.0.1:3040", - "seq_poll_timeout": "30s", - "seq_tx_poll_max_blocks": 15, - "seq_poll_max_retries": 10, - "seq_block_poll_max_amount": 100, - "initial_accounts": [ - { - "Public": { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", - "pub_sign_key": [ - 16, - 162, - 106, - 154, - 236, - 125, - 52, - 184, - 35, - 100, - 238, - 174, - 69, - 197, - 41, - 77, - 187, - 10, - 118, - 75, - 0, - 11, - 148, - 238, - 185, - 181, - 133, - 17, - 220, - 72, - 124, - 77 - ] - } - }, - { - "Public": { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", - "pub_sign_key": [ - 113, - 121, - 64, - 177, - 204, - 85, - 229, - 214, - 178, - 6, - 109, - 191, - 29, - 154, - 63, - 38, - 242, - 18, - 244, - 219, - 8, - 208, - 35, - 136, - 23, - 127, - 207, - 237, - 216, - 169, - 190, - 27 - ] - } - }, + "sequencer_addr": "http://127.0.0.1:3040", + "seq_poll_timeout": "30s", + "seq_tx_poll_max_blocks": 15, + "seq_poll_max_retries": 10, + "seq_block_poll_max_amount": 100, + "initial_accounts": [ { - "Private": { - "account_id": "2ECgkFTaXzwjJBXR7ZKmXYQtpHbvTTHK9Auma4NL9AUo", - "account": { - "program_owner": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "balance": 10000, - "data": [], - "nonce": 0 + "Public": { + "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r", + "pub_sign_key": "7f273098f25b71e6c005a9519f2678da8d1c7f01f6a27778e2d9948abdf901fb" + } }, - "key_chain": { - "secret_spending_key": [112, 17, 152, 192, 217, 201, 142, 92, 111, 68, 85, 222, 107, 73, 78, 196, 118, 226, 37, 17, 185, 177, 149, 182, 9, 85, 187, 152, 163, 144, 68, 121], - "private_key_holder": { - "nullifier_secret_key": [52, 33, 235, 245, 42, 132, 163, 182, 114, 56, 144, 187, 147, 23, 184, 227, 128, 12, 180, 142, 217, 110, 188, 177, 155, 141, 23, 127, 216, 185, 33, 126], - "viewing_secret_key": [44, 81, 165, 166, 34, 188, 192, 240, 40, 9, 83, 189, 215, 184, 246, 154, 247, 227, 155, 16, 121, 238, 4, 245, 63, 135, 192, 213, 222, 247, 120, 86] - }, - "nullifer_public_key": [13, 25, 40, 5, 198, 248, 210, 248, 237, 121, 124, 145, 186, 142, 253, 216, 236, 69, 193, 32, 166, 167, 49, 133, 172, 111, 159, 46, 84, 17, 157, 23], - "viewing_public_key": [3, 43, 116, 165, 161, 27, 150, 158, 175, 198, 215, 27, 121, 126, 158, 224, 249, 92, 168, 163, 173, 115, 120, 122, 89, 173, 133, 94, 39, 238, 62, 52, 193] - } - } - }, - { - "Private": { - "account_id": "E8HwiTyQe4H9HK7icTvn95HQMnzx49mP9A2ddtMLpNaN", - "account": { - "program_owner": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "balance": 20000, - "data": [], - "nonce": 0 + { + "Public": { + "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2", + "pub_sign_key": "f434f8741720014586ae43356d2aec6257da086222f604ddb75d69733b86fc4c" + } }, - "key_chain": { - "secret_spending_key": [48, 175, 124, 10, 230, 240, 166, 14, 249, 254, 157, 226, 208, 124, 122, 177, 203, 139, 192, 180, 43, 120, 55, 151, 50, 21, 113, 22, 254, 83, 148, 56], - "private_key_holder": { - "nullifier_secret_key": [99, 82, 190, 140, 234, 10, 61, 163, 15, 211, 179, 54, 70, 166, 87, 5, 182, 68, 117, 244, 217, 23, 99, 9, 4, 177, 230, 125, 109, 91, 160, 30], - "viewing_secret_key": [205, 32, 76, 251, 255, 236, 96, 119, 61, 111, 65, 100, 75, 218, 12, 22, 17, 170, 55, 226, 21, 154, 161, 34, 208, 74, 27, 1, 119, 13, 88, 128] - }, - "nullifer_public_key": [32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165], - "viewing_public_key": [2, 79, 110, 46, 203, 29, 206, 205, 18, 86, 27, 189, 104, 103, 113, 181, 110, 53, 78, 172, 11, 171, 190, 18, 126, 214, 81, 77, 192, 154, 58, 195, 238] + { + "Private": { + "account_id": "9DGDXnrNo4QhUUb2F8WDuDrPESja3eYDkZG5HkzvAvMC", + "account": { + "program_owner": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "balance": 10000, + "data": [], + "nonce": 0 + }, + "key_chain": { + "secret_spending_key": [ + 75, + 231, + 144, + 165, + 5, + 36, + 183, + 237, + 190, + 227, + 238, + 13, + 132, + 39, + 114, + 228, + 172, + 82, + 119, + 164, + 233, + 132, + 130, + 224, + 201, + 90, + 200, + 156, + 108, + 199, + 56, + 22 + ], + "private_key_holder": { + "nullifier_secret_key": [ + 212, + 34, + 166, + 184, + 182, + 77, + 127, + 176, + 147, + 68, + 148, + 190, + 41, + 244, + 8, + 202, + 51, + 10, + 44, + 43, + 93, + 41, + 229, + 130, + 54, + 96, + 198, + 242, + 10, + 227, + 119, + 1 + ], + "viewing_secret_key": [ + 205, + 10, + 5, + 19, + 148, + 98, + 49, + 19, + 251, + 186, + 247, + 216, + 75, + 53, + 184, + 36, + 84, + 87, + 236, + 205, + 105, + 217, + 213, + 21, + 61, + 183, + 133, + 174, + 121, + 115, + 51, + 203 + ] + }, + "nullifier_public_key": [ + 122, + 213, + 113, + 8, + 118, + 179, + 235, + 94, + 5, + 219, + 131, + 106, + 246, + 253, + 14, + 204, + 65, + 93, + 0, + 198, + 100, + 108, + 57, + 48, + 6, + 65, + 183, + 31, + 136, + 86, + 82, + 165 + ], + "viewing_public_key": [ + 3, + 165, + 235, + 215, + 77, + 4, + 19, + 45, + 0, + 27, + 18, + 26, + 11, + 226, + 126, + 174, + 144, + 167, + 160, + 199, + 14, + 23, + 49, + 163, + 49, + 138, + 129, + 229, + 79, + 9, + 15, + 234, + 30 + ] + } + } + }, + { + "Private": { + "account_id": "A6AT9UvsgitUi8w4BH43n6DyX1bK37DtSCfjEWXQQUrQ", + "account": { + "program_owner": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "balance": 20000, + "data": [], + "nonce": 0 + }, + "key_chain": { + "secret_spending_key": [ + 107, + 49, + 136, + 174, + 162, + 107, + 250, + 105, + 252, + 146, + 166, + 197, + 163, + 132, + 153, + 222, + 68, + 17, + 87, + 101, + 22, + 113, + 88, + 97, + 180, + 203, + 139, + 18, + 28, + 62, + 51, + 149 + ], + "private_key_holder": { + "nullifier_secret_key": [ + 219, + 5, + 233, + 185, + 144, + 150, + 100, + 58, + 97, + 5, + 57, + 163, + 110, + 46, + 241, + 216, + 155, + 217, + 100, + 51, + 184, + 21, + 225, + 148, + 198, + 9, + 121, + 239, + 232, + 98, + 22, + 218 + ], + "viewing_secret_key": [ + 35, + 105, + 230, + 121, + 218, + 177, + 21, + 55, + 83, + 80, + 95, + 235, + 161, + 83, + 11, + 221, + 67, + 83, + 1, + 218, + 49, + 242, + 53, + 29, + 26, + 171, + 170, + 144, + 49, + 233, + 159, + 48 + ] + }, + "nullifier_public_key": [ + 33, + 68, + 229, + 154, + 12, + 235, + 210, + 229, + 236, + 144, + 126, + 122, + 58, + 107, + 36, + 58, + 243, + 128, + 174, + 197, + 141, + 137, + 162, + 190, + 155, + 234, + 94, + 156, + 218, + 34, + 13, + 221 + ], + "viewing_public_key": [ + 3, + 122, + 7, + 137, + 250, + 84, + 10, + 85, + 3, + 15, + 134, + 250, + 205, + 40, + 126, + 211, + 14, + 120, + 15, + 55, + 56, + 214, + 72, + 243, + 83, + 17, + 124, + 242, + 251, + 184, + 174, + 150, + 83 + ] + } + } } - } - } - ], - "basic_auth": null + ] } \ No newline at end of file diff --git a/wallet/src/chain_storage.rs b/wallet/src/chain_storage.rs index 90325666..3699609b 100644 --- a/wallet/src/chain_storage.rs +++ b/wallet/src/chain_storage.rs @@ -1,6 +1,7 @@ use std::collections::{BTreeMap, HashMap, btree_map::Entry}; use anyhow::Result; +use bip39::Mnemonic; use key_protocol::{ key_management::{ key_tree::{KeyTreePrivate, KeyTreePublic, chain_index::ChainIndex}, @@ -95,33 +96,69 @@ impl WalletChainStore { }) } - pub fn new_storage(config: WalletConfig, password: String) -> Result { + pub fn new_storage(config: WalletConfig, password: &str) -> Result<(Self, Mnemonic)> { let mut public_init_acc_map = BTreeMap::new(); let mut private_init_acc_map = BTreeMap::new(); - for init_acc_data in config.initial_accounts.clone() { + let initial_accounts = config + .initial_accounts + .clone() + .unwrap_or_else(InitialAccountData::create_initial_accounts_data); + + for init_acc_data in initial_accounts { match init_acc_data { InitialAccountData::Public(data) => { public_init_acc_map.insert(data.account_id, data.pub_sign_key); } InitialAccountData::Private(data) => { let mut account = data.account; - // TODO: Program owner is only known after code is compiled and can't be set in - // the config. Therefore we overwrite it here on startup. Fix this when program - // id can be fetched from the node and queried from the wallet. + // TODO: Program owner is only known after code is compiled and can't be set + // in the config. Therefore we overwrite it here on + // startup. Fix this when program id can be fetched + // from the node and queried from the wallet. account.program_owner = Program::authenticated_transfer_program().id(); private_init_acc_map.insert(data.account_id, (data.key_chain, account)); } } } - let public_tree = KeyTreePublic::new(&SeedHolder::new_mnemonic(password.clone())); - let private_tree = KeyTreePrivate::new(&SeedHolder::new_mnemonic(password)); + // TODO: Use password for storage encryption + let _ = password; + let (seed_holder, mnemonic) = SeedHolder::new_mnemonic(""); + let public_tree = KeyTreePublic::new(&seed_holder); + let private_tree = KeyTreePrivate::new(&seed_holder); + + Ok(( + Self { + user_data: NSSAUserData::new_with_accounts( + public_init_acc_map, + private_init_acc_map, + public_tree, + private_tree, + )?, + wallet_config: config, + labels: HashMap::new(), + }, + mnemonic, + )) + } + + /// Restore storage from an existing mnemonic phrase. + pub fn restore_storage( + config: WalletConfig, + mnemonic: &Mnemonic, + password: &str, + ) -> Result { + // TODO: Use password for storage encryption + let _ = password; + let seed_holder = SeedHolder::from_mnemonic(mnemonic, ""); + let public_tree = KeyTreePublic::new(&seed_holder); + let private_tree = KeyTreePrivate::new(&seed_holder); Ok(Self { user_data: NSSAUserData::new_with_accounts( - public_init_acc_map, - private_init_acc_map, + BTreeMap::new(), + BTreeMap::new(), public_tree, private_tree, )?, @@ -166,112 +203,17 @@ mod tests { }; use super::*; - use crate::config::{ - InitialAccountData, PersistentAccountDataPrivate, PersistentAccountDataPublic, - }; - - fn create_initial_accounts() -> Vec { - let initial_acc1 = serde_json::from_str( - r#"{ - "Public": { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", - "pub_sign_key": [ - 16, - 162, - 106, - 154, - 236, - 125, - 52, - 184, - 35, - 100, - 238, - 174, - 69, - 197, - 41, - 77, - 187, - 10, - 118, - 75, - 0, - 11, - 148, - 238, - 185, - 181, - 133, - 17, - 220, - 72, - 124, - 77 - ] - } - }"#, - ) - .unwrap(); - - let initial_acc2 = serde_json::from_str( - r#"{ - "Public": { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", - "pub_sign_key": [ - 113, - 121, - 64, - 177, - 204, - 85, - 229, - 214, - 178, - 6, - 109, - 191, - 29, - 154, - 63, - 38, - 242, - 18, - 244, - 219, - 8, - 208, - 35, - 136, - 23, - 127, - 207, - 237, - 216, - 169, - 190, - 27 - ] - } - }"#, - ) - .unwrap(); - - let initial_accounts = vec![initial_acc1, initial_acc2]; - - initial_accounts - } + use crate::config::{PersistentAccountDataPrivate, PersistentAccountDataPublic}; fn create_sample_wallet_config() -> WalletConfig { WalletConfig { - override_rust_log: None, sequencer_addr: "http://127.0.0.1".parse().unwrap(), seq_poll_timeout: std::time::Duration::from_secs(12), seq_tx_poll_max_blocks: 5, seq_poll_max_retries: 10, seq_block_poll_max_amount: 100, - initial_accounts: create_initial_accounts(), basic_auth: None, + initial_accounts: None, } } diff --git a/wallet/src/cli/account.rs b/wallet/src/cli/account.rs index 319fac0e..2a8ed2c7 100644 --- a/wallet/src/cli/account.rs +++ b/wallet/src/cli/account.rs @@ -3,6 +3,7 @@ use clap::Subcommand; use itertools::Itertools as _; use key_protocol::key_management::key_tree::chain_index::ChainIndex; use nssa::{Account, PublicKey, program::Program}; +use sequencer_service_rpc::RpcClient as _; use token_core::{TokenDefinition, TokenHolding}; use crate::{ @@ -145,7 +146,7 @@ impl WalletSubcommand for NewSubcommand { println!( "Generated new account with account_id Private/{account_id} at path {chain_index}", ); - println!("With npk {}", hex::encode(key.nullifer_public_key.0)); + println!("With npk {}", hex::encode(key.nullifier_public_key.0)); println!( "With vpk {}", hex::encode(key.viewing_public_key.to_bytes()) @@ -208,7 +209,7 @@ impl WalletSubcommand for AccountSubcommand { .get_private_account(account_id) .context("Private account not found in storage")?; - println!("npk {}", hex::encode(key.nullifer_public_key.0)); + println!("npk {}", hex::encode(key.nullifier_public_key.0)); println!("vpk {}", hex::encode(key.viewing_public_key.to_bytes())); } } @@ -244,11 +245,7 @@ impl WalletSubcommand for AccountSubcommand { } Self::New(new_subcommand) => new_subcommand.handle_subcommand(wallet_core).await, Self::SyncPrivate => { - let curr_last_block = wallet_core - .sequencer_client - .get_last_block() - .await? - .last_block; + let curr_last_block = wallet_core.sequencer_client.get_last_block_id().await?; if wallet_core .storage diff --git a/wallet/src/cli/chain.rs b/wallet/src/cli/chain.rs index 4beadbbc..dfb22eba 100644 --- a/wallet/src/cli/chain.rs +++ b/wallet/src/cli/chain.rs @@ -1,6 +1,7 @@ use anyhow::Result; use clap::Subcommand; use common::HashType; +use sequencer_service_rpc::RpcClient as _; use crate::{ WalletCore, @@ -32,22 +33,19 @@ impl WalletSubcommand for ChainSubcommand { ) -> Result { match self { Self::CurrentBlockId => { - let latest_block_res = wallet_core.sequencer_client.get_last_block().await?; + let latest_block_id = wallet_core.sequencer_client.get_last_block_id().await?; - println!("Last block id is {}", latest_block_res.last_block); + println!("Last block id is {latest_block_id}"); } Self::Block { id } => { - let block_res = wallet_core.sequencer_client.get_block(id).await?; + let block = wallet_core.sequencer_client.get_block(id).await?; - println!("Last block id is {:#?}", block_res.block); + println!("Last block id is {block:#?}"); } Self::Transaction { hash } => { - let tx_res = wallet_core - .sequencer_client - .get_transaction_by_hash(hash) - .await?; + let tx = wallet_core.sequencer_client.get_transaction(hash).await?; - println!("Last block id is {:#?}", tx_res.transaction); + println!("Transaction is {tx:#?}"); } } Ok(SubcommandReturnValue::Empty) diff --git a/wallet/src/cli/config.rs b/wallet/src/cli/config.rs index ac94a1b7..7f0ba952 100644 --- a/wallet/src/cli/config.rs +++ b/wallet/src/cli/config.rs @@ -4,6 +4,7 @@ use clap::Subcommand; use crate::{ WalletCore, cli::{SubcommandReturnValue, WalletSubcommand}, + config::InitialAccountData, }; /// Represents generic config CLI subcommand. @@ -37,15 +38,6 @@ impl WalletSubcommand for ConfigSubcommand { println!("{config_str}"); } else if let Some(key) = key { match key.as_str() { - "override_rust_log" => { - if let Some(value) = - &wallet_core.storage.wallet_config.override_rust_log - { - println!("{value}"); - } else { - println!("Not set"); - } - } "sequencer_addr" => { println!("{}", wallet_core.storage.wallet_config.sequencer_addr); } @@ -68,7 +60,17 @@ impl WalletSubcommand for ConfigSubcommand { ); } "initial_accounts" => { - println!("{:#?}", wallet_core.storage.wallet_config.initial_accounts); + println!( + "{:#?}", + wallet_core + .storage + .wallet_config + .initial_accounts + .clone() + .unwrap_or_else( + InitialAccountData::create_initial_accounts_data + ) + ); } "basic_auth" => { if let Some(basic_auth) = &wallet_core.storage.wallet_config.basic_auth @@ -88,9 +90,6 @@ impl WalletSubcommand for ConfigSubcommand { } Self::Set { key, value } => { match key.as_str() { - "override_rust_log" => { - wallet_core.storage.wallet_config.override_rust_log = Some(value); - } "sequencer_addr" => { wallet_core.storage.wallet_config.sequencer_addr = value.parse()?; } diff --git a/wallet/src/cli/mod.rs b/wallet/src/cli/mod.rs index 58d77d6a..1653e938 100644 --- a/wallet/src/cli/mod.rs +++ b/wallet/src/cli/mod.rs @@ -1,9 +1,12 @@ -use std::{io::Write as _, path::PathBuf, sync::Arc}; +use std::{io::Write as _, path::PathBuf, str::FromStr as _}; use anyhow::{Context as _, Result}; +use bip39::Mnemonic; use clap::{Parser, Subcommand}; -use common::HashType; +use common::{HashType, transaction::NSSATransaction}; +use futures::TryFutureExt as _; use nssa::{ProgramDeploymentTransaction, program::Program}; +use sequencer_service_rpc::RpcClient as _; use crate::{ WalletCore, @@ -12,8 +15,9 @@ use crate::{ chain::ChainSubcommand, config::ConfigSubcommand, programs::{ - amm::AmmProgramAgnosticSubcommand, native_token_transfer::AuthTransferSubcommand, - pinata::PinataProgramAgnosticSubcommand, token::TokenProgramAgnosticSubcommand, + amm::AmmProgramAgnosticSubcommand, ata::AtaSubcommand, + native_token_transfer::AuthTransferSubcommand, pinata::PinataProgramAgnosticSubcommand, + token::TokenProgramAgnosticSubcommand, }, }, }; @@ -50,6 +54,9 @@ pub enum Command { /// AMM program interaction subcommand. #[command(subcommand)] AMM(AmmProgramAgnosticSubcommand), + /// Associated Token Account program interaction subcommand. + #[command(subcommand)] + Ata(AtaSubcommand), /// Check the wallet can connect to the node and builtin local programs /// match the remote versions. CheckHealth, @@ -156,12 +163,14 @@ pub async fn execute_subcommand( } Command::Token(token_subcommand) => token_subcommand.handle_subcommand(wallet_core).await?, Command::AMM(amm_subcommand) => amm_subcommand.handle_subcommand(wallet_core).await?, + Command::Ata(ata_subcommand) => ata_subcommand.handle_subcommand(wallet_core).await?, Command::Config(config_subcommand) => { config_subcommand.handle_subcommand(wallet_core).await? } Command::RestoreKeys { depth } => { + let mnemonic = read_mnemonic_from_stdin()?; let password = read_password_from_stdin()?; - wallet_core.reset_storage(password)?; + wallet_core.restore_storage(&mnemonic, &password)?; execute_keys_restoration(wallet_core, depth).await?; SubcommandReturnValue::Empty @@ -175,7 +184,7 @@ pub async fn execute_subcommand( let transaction = ProgramDeploymentTransaction::new(message); let _response = wallet_core .sequencer_client - .send_tx_program(transaction) + .send_transaction(NSSATransaction::ProgramDeployment(transaction)) .await .context("Transaction submission error")?; @@ -188,11 +197,7 @@ pub async fn execute_subcommand( pub async fn execute_continuous_run(wallet_core: &mut WalletCore) -> Result<()> { loop { - let latest_block_num = wallet_core - .sequencer_client - .get_last_block() - .await? - .last_block; + let latest_block_num = wallet_core.sequencer_client.get_last_block_id().await?; wallet_core.sync_to_block(latest_block_num).await?; tokio::time::sleep(wallet_core.config().seq_poll_timeout).await; @@ -209,6 +214,16 @@ pub fn read_password_from_stdin() -> Result { Ok(password.trim().to_owned()) } +pub fn read_mnemonic_from_stdin() -> Result { + let mut phrase = String::new(); + + print!("Input recovery phrase: "); + std::io::stdout().flush()?; + std::io::stdin().read_line(&mut phrase)?; + + Mnemonic::from_str(phrase.trim()).context("Invalid mnemonic phrase") +} + pub async fn execute_keys_restoration(wallet_core: &mut WalletCore, depth: u32) -> Result<()> { wallet_core .storage @@ -230,16 +245,17 @@ pub async fn execute_keys_restoration(wallet_core: &mut WalletCore, depth: u32) .storage .user_data .public_key_tree - .cleanup_tree_remove_uninit_layered(depth, Arc::clone(&wallet_core.sequencer_client)) + .cleanup_tree_remove_uninit_layered(depth, |account_id| { + wallet_core + .sequencer_client + .get_account(account_id) + .map_err(Into::into) + }) .await?; println!("Public tree cleaned up"); - let last_block = wallet_core - .sequencer_client - .get_last_block() - .await? - .last_block; + let last_block = wallet_core.sequencer_client.get_last_block_id().await?; println!("Last block is {last_block}"); diff --git a/wallet/src/cli/programs/amm.rs b/wallet/src/cli/programs/amm.rs index 7307569d..0b721d15 100644 --- a/wallet/src/cli/programs/amm.rs +++ b/wallet/src/cli/programs/amm.rs @@ -32,12 +32,12 @@ pub enum AmmProgramAgnosticSubcommand { #[arg(long)] balance_b: u128, }, - /// Swap. + /// Swap specifying exact input amount. /// /// The account associated with swapping token must be owned. /// /// Only public execution allowed. - Swap { + SwapExactInput { /// `user_holding_a` - valid 32 byte base58 string with privacy prefix. #[arg(long)] user_holding_a: String, @@ -52,6 +52,26 @@ pub enum AmmProgramAgnosticSubcommand { #[arg(long)] token_definition: String, }, + /// Swap specifying exact output amount. + /// + /// The account associated with swapping token must be owned. + /// + /// Only public execution allowed. + SwapExactOutput { + /// `user_holding_a` - valid 32 byte base58 string with privacy prefix. + #[arg(long)] + user_holding_a: String, + /// `user_holding_b` - valid 32 byte base58 string with privacy prefix. + #[arg(long)] + user_holding_b: String, + #[arg(long)] + exact_amount_out: u128, + #[arg(long)] + max_amount_in: u128, + /// `token_definition` - valid 32 byte base58 string WITHOUT privacy prefix. + #[arg(long)] + token_definition: String, + }, /// Add liquidity. /// /// `user_holding_a` and `user_holding_b` must be owned. @@ -150,7 +170,7 @@ impl WalletSubcommand for AmmProgramAgnosticSubcommand { } } } - Self::Swap { + Self::SwapExactInput { user_holding_a, user_holding_b, amount_in, @@ -168,7 +188,7 @@ impl WalletSubcommand for AmmProgramAgnosticSubcommand { match (user_holding_a_privacy, user_holding_b_privacy) { (AccountPrivacyKind::Public, AccountPrivacyKind::Public) => { Amm(wallet_core) - .send_swap( + .send_swap_exact_input( user_holding_a, user_holding_b, amount_in, @@ -185,6 +205,41 @@ impl WalletSubcommand for AmmProgramAgnosticSubcommand { } } } + Self::SwapExactOutput { + user_holding_a, + user_holding_b, + exact_amount_out, + max_amount_in, + token_definition, + } => { + let (user_holding_a, user_holding_a_privacy) = + parse_addr_with_privacy_prefix(&user_holding_a)?; + let (user_holding_b, user_holding_b_privacy) = + parse_addr_with_privacy_prefix(&user_holding_b)?; + + let user_holding_a: AccountId = user_holding_a.parse()?; + let user_holding_b: AccountId = user_holding_b.parse()?; + + match (user_holding_a_privacy, user_holding_b_privacy) { + (AccountPrivacyKind::Public, AccountPrivacyKind::Public) => { + Amm(wallet_core) + .send_swap_exact_output( + user_holding_a, + user_holding_b, + exact_amount_out, + max_amount_in, + token_definition.parse()?, + ) + .await?; + + Ok(SubcommandReturnValue::Empty) + } + _ => { + // ToDo: Implement after private multi-chain calls is available + anyhow::bail!("Only public execution allowed for Amm calls"); + } + } + } Self::AddLiquidity { user_holding_a, user_holding_b, diff --git a/wallet/src/cli/programs/ata.rs b/wallet/src/cli/programs/ata.rs new file mode 100644 index 00000000..1a63fa67 --- /dev/null +++ b/wallet/src/cli/programs/ata.rs @@ -0,0 +1,240 @@ +use anyhow::Result; +use clap::Subcommand; +use common::transaction::NSSATransaction; +use nssa::{Account, AccountId, program::Program}; +use token_core::TokenHolding; + +use crate::{ + AccDecodeData::Decode, + WalletCore, + cli::{SubcommandReturnValue, WalletSubcommand}, + helperfunctions::{AccountPrivacyKind, parse_addr_with_privacy_prefix}, + program_facades::ata::Ata, +}; + +/// Represents generic CLI subcommand for a wallet working with the ATA program. +#[derive(Subcommand, Debug, Clone)] +pub enum AtaSubcommand { + /// Derive and print the Associated Token Account address (local only, no network). + Address { + /// Owner account - valid 32 byte base58 string (no privacy prefix). + #[arg(long)] + owner: String, + /// Token definition account - valid 32 byte base58 string (no privacy prefix). + #[arg(long)] + token_definition: String, + }, + /// Create (or idempotently no-op) the Associated Token Account. + Create { + /// Owner account - valid 32 byte base58 string with privacy prefix. + #[arg(long)] + owner: String, + /// Token definition account - valid 32 byte base58 string WITHOUT privacy prefix. + #[arg(long)] + token_definition: String, + }, + /// Send tokens from owner's ATA to a recipient token holding account. + Send { + /// Sender account - valid 32 byte base58 string with privacy prefix. + #[arg(long)] + from: String, + /// Token definition account - valid 32 byte base58 string WITHOUT privacy prefix. + #[arg(long)] + token_definition: String, + /// Recipient account - valid 32 byte base58 string WITHOUT privacy prefix. + #[arg(long)] + to: String, + #[arg(long)] + amount: u128, + }, + /// Burn tokens from holder's ATA. + Burn { + /// Holder account - valid 32 byte base58 string with privacy prefix. + #[arg(long)] + holder: String, + /// Token definition account - valid 32 byte base58 string WITHOUT privacy prefix. + #[arg(long)] + token_definition: String, + #[arg(long)] + amount: u128, + }, + /// List all ATAs for a given owner across multiple token definitions. + List { + /// Owner account - valid 32 byte base58 string (no privacy prefix). + #[arg(long)] + owner: String, + /// Token definition accounts - valid 32 byte base58 strings (no privacy prefix). + #[arg(long, num_args = 1..)] + token_definition: Vec, + }, +} + +impl WalletSubcommand for AtaSubcommand { + async fn handle_subcommand( + self, + wallet_core: &mut WalletCore, + ) -> Result { + match self { + Self::Address { + owner, + token_definition, + } => { + let owner_id: AccountId = owner.parse()?; + let definition_id: AccountId = token_definition.parse()?; + let ata_program_id = Program::ata().id(); + let ata_id = ata_core::get_associated_token_account_id( + &ata_program_id, + &ata_core::compute_ata_seed(owner_id, definition_id), + ); + println!("{ata_id}"); + Ok(SubcommandReturnValue::Empty) + } + Self::Create { + owner, + token_definition, + } => { + let (owner_str, owner_privacy) = parse_addr_with_privacy_prefix(&owner)?; + let owner_id: AccountId = owner_str.parse()?; + let definition_id: AccountId = token_definition.parse()?; + + match owner_privacy { + AccountPrivacyKind::Public => { + Ata(wallet_core) + .send_create(owner_id, definition_id) + .await?; + Ok(SubcommandReturnValue::Empty) + } + AccountPrivacyKind::Private => { + let (tx_hash, secret) = Ata(wallet_core) + .send_create_private_owner(owner_id, definition_id) + .await?; + + println!("Transaction hash is {tx_hash}"); + + let tx = wallet_core.poll_native_token_transfer(tx_hash).await?; + if let NSSATransaction::PrivacyPreserving(tx) = tx { + wallet_core.decode_insert_privacy_preserving_transaction_results( + &tx, + &[Decode(secret, owner_id)], + )?; + } + + wallet_core.store_persistent_data().await?; + Ok(SubcommandReturnValue::Empty) + } + } + } + Self::Send { + from, + token_definition, + to, + amount, + } => { + let (from_str, from_privacy) = parse_addr_with_privacy_prefix(&from)?; + let from_id: AccountId = from_str.parse()?; + let definition_id: AccountId = token_definition.parse()?; + let to_id: AccountId = to.parse()?; + + match from_privacy { + AccountPrivacyKind::Public => { + Ata(wallet_core) + .send_transfer(from_id, definition_id, to_id, amount) + .await?; + Ok(SubcommandReturnValue::Empty) + } + AccountPrivacyKind::Private => { + let (tx_hash, secret) = Ata(wallet_core) + .send_transfer_private_owner(from_id, definition_id, to_id, amount) + .await?; + + println!("Transaction hash is {tx_hash}"); + + let tx = wallet_core.poll_native_token_transfer(tx_hash).await?; + if let NSSATransaction::PrivacyPreserving(tx) = tx { + wallet_core.decode_insert_privacy_preserving_transaction_results( + &tx, + &[Decode(secret, from_id)], + )?; + } + + wallet_core.store_persistent_data().await?; + Ok(SubcommandReturnValue::Empty) + } + } + } + Self::Burn { + holder, + token_definition, + amount, + } => { + let (holder_str, holder_privacy) = parse_addr_with_privacy_prefix(&holder)?; + let holder_id: AccountId = holder_str.parse()?; + let definition_id: AccountId = token_definition.parse()?; + + match holder_privacy { + AccountPrivacyKind::Public => { + Ata(wallet_core) + .send_burn(holder_id, definition_id, amount) + .await?; + Ok(SubcommandReturnValue::Empty) + } + AccountPrivacyKind::Private => { + let (tx_hash, secret) = Ata(wallet_core) + .send_burn_private_owner(holder_id, definition_id, amount) + .await?; + + println!("Transaction hash is {tx_hash}"); + + let tx = wallet_core.poll_native_token_transfer(tx_hash).await?; + if let NSSATransaction::PrivacyPreserving(tx) = tx { + wallet_core.decode_insert_privacy_preserving_transaction_results( + &tx, + &[Decode(secret, holder_id)], + )?; + } + + wallet_core.store_persistent_data().await?; + Ok(SubcommandReturnValue::Empty) + } + } + } + Self::List { + owner, + token_definition, + } => { + let owner_id: AccountId = owner.parse()?; + let ata_program_id = Program::ata().id(); + + for def in &token_definition { + let definition_id: AccountId = def.parse()?; + let ata_id = ata_core::get_associated_token_account_id( + &ata_program_id, + &ata_core::compute_ata_seed(owner_id, definition_id), + ); + let account = wallet_core.get_account_public(ata_id).await?; + + if account == Account::default() { + println!("No ATA for definition {definition_id}"); + } else { + let holding = TokenHolding::try_from(&account.data)?; + match holding { + TokenHolding::Fungible { balance, .. } => { + println!( + "ATA {ata_id} (definition {definition_id}): balance {balance}" + ); + } + TokenHolding::NftMaster { .. } + | TokenHolding::NftPrintedCopy { .. } => { + println!( + "ATA {ata_id} (definition {definition_id}): unsupported token type" + ); + } + } + } + } + + Ok(SubcommandReturnValue::Empty) + } + } + } +} diff --git a/wallet/src/cli/programs/mod.rs b/wallet/src/cli/programs/mod.rs index 96a4e766..f6e4b5dc 100644 --- a/wallet/src/cli/programs/mod.rs +++ b/wallet/src/cli/programs/mod.rs @@ -1,4 +1,5 @@ pub mod amm; +pub mod ata; pub mod native_token_transfer; pub mod pinata; pub mod token; diff --git a/wallet/src/cli/programs/native_token_transfer.rs b/wallet/src/cli/programs/native_token_transfer.rs index 314f78ba..b3f833ac 100644 --- a/wallet/src/cli/programs/native_token_transfer.rs +++ b/wallet/src/cli/programs/native_token_transfer.rs @@ -58,14 +58,13 @@ impl WalletSubcommand for AuthTransferSubcommand { AccountPrivacyKind::Public => { let account_id = account_id.parse()?; - let res = NativeTokenTransfer(wallet_core) + let tx_hash = NativeTokenTransfer(wallet_core) .register_account(account_id) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let transfer_tx = - wallet_core.poll_native_token_transfer(res.tx_hash).await?; + let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; println!("Transaction data is {transfer_tx:?}"); @@ -74,13 +73,12 @@ impl WalletSubcommand for AuthTransferSubcommand { AccountPrivacyKind::Private => { let account_id = account_id.parse()?; - let (res, secret) = NativeTokenTransfer(wallet_core) + let (tx_hash, secret) = NativeTokenTransfer(wallet_core) .register_account_private(account_id) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -311,13 +309,12 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommandPrivate { let from: AccountId = from.parse().unwrap(); let to: AccountId = to.parse().unwrap(); - let (res, [secret_from, secret_to]) = NativeTokenTransfer(wallet_core) + let (tx_hash, [secret_from, secret_to]) = NativeTokenTransfer(wallet_core) .send_private_transfer_to_owned_account(from, to, amount) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -351,13 +348,12 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommandPrivate { let to_vpk = nssa_core::encryption::shared_key_derivation::Secp256k1Point(to_vpk.to_vec()); - let (res, [secret_from, _]) = NativeTokenTransfer(wallet_core) + let (tx_hash, [secret_from, _]) = NativeTokenTransfer(wallet_core) .send_private_transfer_to_outer_account(from, to_npk, to_vpk, amount) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -387,13 +383,12 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommandShielded { let from: AccountId = from.parse().unwrap(); let to: AccountId = to.parse().unwrap(); - let (res, secret) = NativeTokenTransfer(wallet_core) + let (tx_hash, secret) = NativeTokenTransfer(wallet_core) .send_shielded_transfer(from, to, amount) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -428,13 +423,11 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommandShielded { let to_vpk = nssa_core::encryption::shared_key_derivation::Secp256k1Point(to_vpk.to_vec()); - let (res, _) = NativeTokenTransfer(wallet_core) + let (tx_hash, _) = NativeTokenTransfer(wallet_core) .send_shielded_transfer_to_outer_account(from, to_npk, to_vpk, amount) .await?; - println!("Results of tx send are {res:#?}"); - - let tx_hash = res.tx_hash; + println!("Transaction hash is {tx_hash}"); wallet_core.store_persistent_data().await?; @@ -460,13 +453,12 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommand { let from: AccountId = from.parse().unwrap(); let to: AccountId = to.parse().unwrap(); - let (res, secret) = NativeTokenTransfer(wallet_core) + let (tx_hash, secret) = NativeTokenTransfer(wallet_core) .send_deshielded_transfer(from, to, amount) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -486,13 +478,13 @@ impl WalletSubcommand for NativeTokenTransferProgramSubcommand { let from: AccountId = from.parse().unwrap(); let to: AccountId = to.parse().unwrap(); - let res = NativeTokenTransfer(wallet_core) + let tx_hash = NativeTokenTransfer(wallet_core) .send_public_transfer(from, to, amount) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let transfer_tx = wallet_core.poll_native_token_transfer(res.tx_hash).await?; + let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; println!("Transaction data is {transfer_tx:?}"); diff --git a/wallet/src/cli/programs/pinata.rs b/wallet/src/cli/programs/pinata.rs index 948da9c2..94cb0649 100644 --- a/wallet/src/cli/programs/pinata.rs +++ b/wallet/src/cli/programs/pinata.rs @@ -112,13 +112,12 @@ impl WalletSubcommand for PinataProgramSubcommandPublic { .await .context("failed to compute solution")?; - let res = Pinata(wallet_core) + let tx_hash = Pinata(wallet_core) .claim(pinata_account_id, winner_account_id, solution) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; println!("Transaction data is {transfer_tx:?}"); @@ -148,13 +147,12 @@ impl WalletSubcommand for PinataProgramSubcommandPrivate { .await .context("failed to compute solution")?; - let (res, secret_winner) = Pinata(wallet_core) + let (tx_hash, secret_winner) = Pinata(wallet_core) .claim_private_owned_account(pinata_account_id, winner_account_id, solution) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; println!("Transaction data is {transfer_tx:?}"); diff --git a/wallet/src/cli/programs/token.rs b/wallet/src/cli/programs/token.rs index 65a283dd..4274b1da 100644 --- a/wallet/src/cli/programs/token.rs +++ b/wallet/src/cli/programs/token.rs @@ -713,7 +713,7 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { let sender_account_id: AccountId = sender_account_id.parse().unwrap(); let recipient_account_id: AccountId = recipient_account_id.parse().unwrap(); - let (res, [secret_sender, secret_recipient]) = Token(wallet_core) + let (tx_hash, [secret_sender, secret_recipient]) = Token(wallet_core) .send_transfer_transaction_private_owned_account( sender_account_id, recipient_account_id, @@ -721,9 +721,8 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -761,7 +760,7 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { recipient_vpk.to_vec(), ); - let (res, [secret_sender, _]) = Token(wallet_core) + let (tx_hash, [secret_sender, _]) = Token(wallet_core) .send_transfer_transaction_private_foreign_account( sender_account_id, recipient_npk, @@ -770,9 +769,8 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -796,7 +794,7 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, [secret_definition, secret_holder]) = Token(wallet_core) + let (tx_hash, [secret_definition, secret_holder]) = Token(wallet_core) .send_burn_transaction_private_owned_account( definition_account_id, holder_account_id, @@ -804,9 +802,8 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -833,7 +830,7 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, [secret_definition, secret_holder]) = Token(wallet_core) + let (tx_hash, [secret_definition, secret_holder]) = Token(wallet_core) .send_mint_transaction_private_owned_account( definition_account_id, holder_account_id, @@ -841,9 +838,8 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -882,7 +878,7 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { holder_vpk.to_vec(), ); - let (res, [secret_definition, _]) = Token(wallet_core) + let (tx_hash, [secret_definition, _]) = Token(wallet_core) .send_mint_transaction_private_foreign_account( definition_account_id, holder_npk, @@ -891,9 +887,8 @@ impl WalletSubcommand for TokenProgramSubcommandPrivate { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -927,7 +922,7 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { let sender_account_id: AccountId = sender_account_id.parse().unwrap(); let recipient_account_id: AccountId = recipient_account_id.parse().unwrap(); - let (res, secret_sender) = Token(wallet_core) + let (tx_hash, secret_sender) = Token(wallet_core) .send_transfer_transaction_deshielded( sender_account_id, recipient_account_id, @@ -935,9 +930,8 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -961,7 +955,7 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, secret_definition) = Token(wallet_core) + let (tx_hash, secret_definition) = Token(wallet_core) .send_burn_transaction_deshielded_owned_account( definition_account_id, holder_account_id, @@ -969,9 +963,8 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -995,7 +988,7 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, secret_definition) = Token(wallet_core) + let (tx_hash, secret_definition) = Token(wallet_core) .send_mint_transaction_deshielded( definition_account_id, holder_account_id, @@ -1003,9 +996,8 @@ impl WalletSubcommand for TokenProgramSubcommandDeshielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1050,7 +1042,7 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { recipient_vpk.to_vec(), ); - let (res, _) = Token(wallet_core) + let (tx_hash, _) = Token(wallet_core) .send_transfer_transaction_shielded_foreign_account( sender_account_id, recipient_npk, @@ -1059,9 +1051,8 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1080,7 +1071,7 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { let sender_account_id: AccountId = sender_account_id.parse().unwrap(); let recipient_account_id: AccountId = recipient_account_id.parse().unwrap(); - let (res, secret_recipient) = Token(wallet_core) + let (tx_hash, secret_recipient) = Token(wallet_core) .send_transfer_transaction_shielded_owned_account( sender_account_id, recipient_account_id, @@ -1088,9 +1079,8 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1114,7 +1104,7 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, secret_holder) = Token(wallet_core) + let (tx_hash, secret_holder) = Token(wallet_core) .send_burn_transaction_shielded( definition_account_id, holder_account_id, @@ -1122,9 +1112,8 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1148,7 +1137,7 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let holder_account_id: AccountId = holder_account_id.parse().unwrap(); - let (res, secret_holder) = Token(wallet_core) + let (tx_hash, secret_holder) = Token(wallet_core) .send_mint_transaction_shielded_owned_account( definition_account_id, holder_account_id, @@ -1156,9 +1145,8 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1194,7 +1182,7 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { holder_vpk.to_vec(), ); - let (res, _) = Token(wallet_core) + let (tx_hash, _) = Token(wallet_core) .send_mint_transaction_shielded_foreign_account( definition_account_id, holder_npk, @@ -1203,9 +1191,8 @@ impl WalletSubcommand for TokenProgramSubcommandShielded { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1235,7 +1222,7 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let supply_account_id: AccountId = supply_account_id.parse().unwrap(); - let (res, [secret_definition, secret_supply]) = Token(wallet_core) + let (tx_hash, [secret_definition, secret_supply]) = Token(wallet_core) .send_new_definition_private_owned_definiton_and_supply( definition_account_id, supply_account_id, @@ -1244,9 +1231,8 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1274,7 +1260,7 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let supply_account_id: AccountId = supply_account_id.parse().unwrap(); - let (res, secret_definition) = Token(wallet_core) + let (tx_hash, secret_definition) = Token(wallet_core) .send_new_definition_private_owned_definiton( definition_account_id, supply_account_id, @@ -1283,9 +1269,8 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { @@ -1310,7 +1295,7 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { let definition_account_id: AccountId = definition_account_id.parse().unwrap(); let supply_account_id: AccountId = supply_account_id.parse().unwrap(); - let (res, secret_supply) = Token(wallet_core) + let (tx_hash, secret_supply) = Token(wallet_core) .send_new_definition_private_owned_supply( definition_account_id, supply_account_id, @@ -1319,9 +1304,8 @@ impl WalletSubcommand for CreateNewTokenProgramSubcommand { ) .await?; - println!("Results of tx send are {res:#?}"); + println!("Transaction hash is {tx_hash}"); - let tx_hash = res.tx_hash; let transfer_tx = wallet_core.poll_native_token_transfer(tx_hash).await?; if let NSSATransaction::PrivacyPreserving(tx) = transfer_tx { diff --git a/wallet/src/config.rs b/wallet/src/config.rs index 7e4c4cec..33527009 100644 --- a/wallet/src/config.rs +++ b/wallet/src/config.rs @@ -8,22 +8,17 @@ use std::{ use anyhow::{Context as _, Result}; use common::config::BasicAuth; use humantime_serde; -use key_protocol::key_management::{ - KeyChain, - key_tree::{ - chain_index::ChainIndex, keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic, - }, +use key_protocol::key_management::key_tree::{ + chain_index::ChainIndex, keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic, }; use log::warn; use serde::{Deserialize, Serialize}; +use testnet_initial_state::{ + PrivateAccountPrivateInitialData, PublicAccountPrivateInitialData, + initial_priv_accounts_private_keys, initial_pub_accounts_private_keys, +}; use url::Url; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct InitialAccountDataPublic { - pub account_id: nssa::AccountId, - pub pub_sign_key: nssa::PrivateKey, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PersistentAccountDataPublic { pub account_id: nssa::AccountId, @@ -31,13 +26,6 @@ pub struct PersistentAccountDataPublic { pub data: ChildKeysPublic, } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct InitialAccountDataPrivate { - pub account_id: nssa::AccountId, - pub account: nssa_core::account::Account, - pub key_chain: KeyChain, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PersistentAccountDataPrivate { pub account_id: nssa::AccountId, @@ -50,8 +38,29 @@ pub struct PersistentAccountDataPrivate { // memory #[derive(Debug, Clone, Serialize, Deserialize)] pub enum InitialAccountData { - Public(InitialAccountDataPublic), - Private(Box), + Public(PublicAccountPrivateInitialData), + Private(Box), +} + +impl InitialAccountData { + #[must_use] + pub const fn account_id(&self) -> nssa::AccountId { + match &self { + Self::Public(acc) => acc.account_id, + Self::Private(acc) => acc.account_id, + } + } + + pub(crate) fn create_initial_accounts_data() -> Vec { + let pub_data = initial_pub_accounts_private_keys(); + let priv_data = initial_priv_accounts_private_keys(); + + pub_data + .into_iter() + .map(Into::into) + .chain(priv_data.into_iter().map(Into::into)) + .collect() + } } // Big difference in enum variants sizes @@ -114,16 +123,6 @@ impl PersistentStorage { } } -impl InitialAccountData { - #[must_use] - pub fn account_id(&self) -> nssa::AccountId { - match &self { - Self::Public(acc) => acc.account_id, - Self::Private(acc) => acc.account_id, - } - } -} - impl PersistentAccountData { #[must_use] pub fn account_id(&self) -> nssa::AccountId { @@ -135,14 +134,14 @@ impl PersistentAccountData { } } -impl From for InitialAccountData { - fn from(value: InitialAccountDataPublic) -> Self { +impl From for InitialAccountData { + fn from(value: PublicAccountPrivateInitialData) -> Self { Self::Public(value) } } -impl From for InitialAccountData { - fn from(value: InitialAccountDataPrivate) -> Self { +impl From for InitialAccountData { + fn from(value: PrivateAccountPrivateInitialData) -> Self { Self::Private(Box::new(value)) } } @@ -186,9 +185,6 @@ pub struct GasConfig { #[optfield::optfield(pub WalletConfigOverrides, rewrap, attrs = (derive(Debug, Default, Clone)))] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WalletConfig { - /// Override rust log (env var logging level). - #[serde(skip_serializing_if = "Option::is_none")] - pub override_rust_log: Option, /// Sequencer URL. pub sequencer_addr: Url, /// Sequencer polling duration for new blocks. @@ -200,308 +196,23 @@ pub struct WalletConfig { pub seq_poll_max_retries: u64, /// Max amount of blocks to poll in one request. pub seq_block_poll_max_amount: u64, - /// Initial accounts for wallet. - pub initial_accounts: Vec, - /// Basic authentication credentials. + /// Basic authentication credentials #[serde(skip_serializing_if = "Option::is_none")] pub basic_auth: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub initial_accounts: Option>, } impl Default for WalletConfig { fn default() -> Self { Self { - override_rust_log: None, sequencer_addr: "http://127.0.0.1:3040".parse().unwrap(), seq_poll_timeout: Duration::from_secs(12), seq_tx_poll_max_blocks: 5, seq_poll_max_retries: 5, seq_block_poll_max_amount: 100, basic_auth: None, - initial_accounts: { - let init_acc_json = r#" - [ - { - "Public": { - "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", - "pub_sign_key": [ - 16, - 162, - 106, - 154, - 236, - 125, - 52, - 184, - 35, - 100, - 238, - 174, - 69, - 197, - 41, - 77, - 187, - 10, - 118, - 75, - 0, - 11, - 148, - 238, - 185, - 181, - 133, - 17, - 220, - 72, - 124, - 77 - ] - } - }, - { - "Public": { - "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", - "pub_sign_key": [ - 113, - 121, - 64, - 177, - 204, - 85, - 229, - 214, - 178, - 6, - 109, - 191, - 29, - 154, - 63, - 38, - 242, - 18, - 244, - 219, - 8, - 208, - 35, - 136, - 23, - 127, - 207, - 237, - 216, - 169, - 190, - 27 - ] - } - }, - { - "Private": { - "account_id": "FpdcxBrMkHWqXCBQ6FG98eYfWGY6jWZRsKNSi1FwDMxy", - "account": { - "program_owner": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "balance": 10000, - "data": [], - "nonce": 0 - }, - "key_chain": { - "secret_spending_key": [ - 239, - 27, - 159, - 83, - 199, - 194, - 132, - 33, - 20, - 28, - 217, - 103, - 101, - 57, - 27, - 125, - 84, - 57, - 19, - 86, - 98, - 135, - 161, - 221, - 108, - 125, - 152, - 174, - 161, - 64, - 16, - 200 - ], - "private_key_holder": { - "nullifier_secret_key": [ - 71, - 195, - 16, - 119, - 0, - 98, - 35, - 106, - 139, - 82, - 145, - 50, - 27, - 140, - 206, - 19, - 53, - 122, - 166, - 76, - 195, - 0, - 16, - 19, - 21, - 143, - 155, - 119, - 9, - 200, - 81, - 105 - ], - "viewing_secret_key": [ - 5, - 117, - 221, - 27, - 236, - 199, - 53, - 22, - 249, - 231, - 98, - 147, - 213, - 116, - 191, - 82, - 188, - 148, - 175, - 98, - 139, - 52, - 232, - 249, - 220, - 217, - 83, - 58, - 112, - 155, - 197, - 196 - ] - }, - "nullifer_public_key": [ - 177, - 64, - 1, - 11, - 87, - 38, - 254, - 159, - 231, - 165, - 1, - 94, - 64, - 137, - 243, - 76, - 249, - 101, - 251, - 129, - 33, - 101, - 189, - 30, - 42, - 11, - 191, - 34, - 103, - 186, - 227, - 230 - ], - "viewing_public_key": [ - 2, 69, 126, 43, 158, 209, 172, 144, 23, 185, 208, 25, 163, 166, 176, 200, 225, 251, 106, 211, 4, 199, 112, 243, 207, 144, 135, 56, 157, 167, 32, 219, 38] - } - } - }, - { - "Private": { - "account_id": "E8HwiTyQe4H9HK7icTvn95HQMnzx49mP9A2ddtMLpNaN", - "account": { - "program_owner": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ], - "balance": 20000, - "data": [], - "nonce": 0 - }, - "key_chain": { - "secret_spending_key": [ - 48, 175, 124, 10, 230, 240, 166, 14, 249, 254, 157, 226, 208, 124, 122, 177, 203, 139, 192, 180, 43, 120, 55, 151, 50, 21, 113, 22, 254, 83, 148, 56], - "private_key_holder": { - "nullifier_secret_key": [ - 99, 82, 190, 140, 234, 10, 61, 163, 15, 211, 179, 54, 70, 166, 87, 5, 182, 68, 117, 244, 217, 23, 99, 9, 4, 177, 230, 125, 109, 91, 160, 30 - ], - "viewing_secret_key": [ - 205, 32, 76, 251, 255, 236, 96, 119, 61, 111, 65, 100, 75, 218, 12, 22, 17, 170, 55, 226, 21, 154, 161, 34, 208, 74, 27, 1, 119, 13, 88, 128 - ] - }, - "nullifer_public_key": [ - 32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165 - ], - "viewing_public_key": [ - 2, 79, 110, 46, 203, 29, 206, 205, 18, 86, 27, 189, 104, 103, 113, 181, 110, 53, 78, 172, 11, 171, 190, 18, 126, 214, 81, 77, 192, 154, 58, 195, 238 - ] - } - } - } - ] - "#; - serde_json::from_str(init_acc_json).unwrap() - }, + initial_accounts: None, } } } @@ -546,31 +257,25 @@ impl WalletConfig { pub fn apply_overrides(&mut self, overrides: WalletConfigOverrides) { let Self { - override_rust_log, sequencer_addr, seq_poll_timeout, seq_tx_poll_max_blocks, seq_poll_max_retries, seq_block_poll_max_amount, - initial_accounts, basic_auth, + initial_accounts, } = self; let WalletConfigOverrides { - override_rust_log: o_override_rust_log, sequencer_addr: o_sequencer_addr, seq_poll_timeout: o_seq_poll_timeout, seq_tx_poll_max_blocks: o_seq_tx_poll_max_blocks, seq_poll_max_retries: o_seq_poll_max_retries, seq_block_poll_max_amount: o_seq_block_poll_max_amount, - initial_accounts: o_initial_accounts, basic_auth: o_basic_auth, + initial_accounts: o_initial_accounts, } = overrides; - if let Some(v) = o_override_rust_log { - warn!("Overriding wallet config 'override_rust_log' to {v:#?}"); - *override_rust_log = v; - } if let Some(v) = o_sequencer_addr { warn!("Overriding wallet config 'sequencer_addr' to {v}"); *sequencer_addr = v; @@ -591,13 +296,13 @@ impl WalletConfig { warn!("Overriding wallet config 'seq_block_poll_max_amount' to {v}"); *seq_block_poll_max_amount = v; } - if let Some(v) = o_initial_accounts { - warn!("Overriding wallet config 'initial_accounts' to {v:#?}"); - *initial_accounts = v; - } if let Some(v) = o_basic_auth { warn!("Overriding wallet config 'basic_auth' to {v:#?}"); *basic_auth = v; } + if let Some(v) = o_initial_accounts { + warn!("Overriding wallet config 'initial_accounts' to {v:#?}"); + *initial_accounts = v; + } } } diff --git a/wallet/src/helperfunctions.rs b/wallet/src/helperfunctions.rs index 74f7bab3..d82dedaf 100644 --- a/wallet/src/helperfunctions.rs +++ b/wallet/src/helperfunctions.rs @@ -7,12 +7,13 @@ use nssa::Account; use nssa_core::account::Nonce; use rand::{RngCore as _, rngs::OsRng}; use serde::Serialize; +use testnet_initial_state::{PrivateAccountPrivateInitialData, PublicAccountPrivateInitialData}; use crate::{ HOME_DIR_ENV_VAR, config::{ - InitialAccountData, InitialAccountDataPrivate, InitialAccountDataPublic, Label, - PersistentAccountDataPrivate, PersistentAccountDataPublic, PersistentStorage, + InitialAccountData, Label, PersistentAccountDataPrivate, PersistentAccountDataPublic, + PersistentStorage, }, }; @@ -119,7 +120,7 @@ pub fn produce_data_for_storage( for (account_id, key) in &user_data.default_pub_account_signing_keys { vec_for_storage.push( - InitialAccountData::Public(InitialAccountDataPublic { + InitialAccountData::Public(PublicAccountPrivateInitialData { account_id: *account_id, pub_sign_key: key.clone(), }) @@ -129,7 +130,7 @@ pub fn produce_data_for_storage( for (account_id, (key_chain, account)) in &user_data.default_user_private_accounts { vec_for_storage.push( - InitialAccountData::Private(Box::new(InitialAccountDataPrivate { + InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData { account_id: *account_id, account: account.clone(), key_chain: key_chain.clone(), diff --git a/wallet/src/lib.rs b/wallet/src/lib.rs index c253797b..63ea8611 100644 --- a/wallet/src/lib.rs +++ b/wallet/src/lib.rs @@ -8,15 +8,12 @@ reason = "Most of the shadows come from args parsing which is ok" )] -use std::{path::PathBuf, sync::Arc}; +use std::path::PathBuf; use anyhow::{Context as _, Result}; -use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; +use bip39::Mnemonic; use chain_storage::WalletChainStore; -use common::{ - HashType, error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse, - sequencer_client::SequencerClient, transaction::NSSATransaction, -}; +use common::{HashType, transaction::NSSATransaction}; use config::WalletConfig; use key_protocol::key_management::key_tree::{chain_index::ChainIndex, traits::KeyNode as _}; use log::info; @@ -26,8 +23,11 @@ use nssa::{ circuit::ProgramWithDependencies, message::EncryptedAccountData, }, }; -use nssa_core::{Commitment, MembershipProof, SharedSecretKey, program::InstructionData}; +use nssa_core::{ + Commitment, MembershipProof, SharedSecretKey, account::Nonce, program::InstructionData, +}; pub use privacy_preserving_tx::PrivacyPreservingAccount; +use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder}; use tokio::io::AsyncWriteExt as _; use crate::{ @@ -51,6 +51,24 @@ pub enum AccDecodeData { Decode(nssa_core::SharedSecretKey, AccountId), } +#[derive(Debug, thiserror::Error)] +pub enum ExecutionFailureKind { + #[error("Failed to get data from sequencer")] + SequencerError(#[source] anyhow::Error), + #[error("Inputs amounts does not match outputs")] + AmountMismatchError, + #[error("Accounts key not found")] + KeyNotFoundError, + #[error("Sequencer client error")] + SequencerClientError(#[from] sequencer_service_rpc::ClientError), + #[error("Can not pay for operation")] + InsufficientFundsError, + #[error("Account {0} data is invalid")] + AccountDataError(AccountId), + #[error("Failed to build transaction: {0}")] + TransactionBuildError(#[from] nssa::error::NssaError), +} + #[expect(clippy::partial_pub_fields, reason = "TODO: make all fields private")] pub struct WalletCore { config_path: PathBuf, @@ -58,7 +76,7 @@ pub struct WalletCore { storage: WalletChainStore, storage_path: PathBuf, poller: TxPoller, - pub sequencer_client: Arc, + pub sequencer_client: SequencerClient, pub last_synced_block: u64, } @@ -100,15 +118,24 @@ impl WalletCore { config_path: PathBuf, storage_path: PathBuf, config_overrides: Option, - password: String, - ) -> Result { - Self::new( + password: &str, + ) -> Result<(Self, Mnemonic)> { + let mut mnemonic_out = None; + let wallet = Self::new( config_path, storage_path, config_overrides, - |config| WalletChainStore::new_storage(config, password), + |config| { + let (storage, mnemonic) = WalletChainStore::new_storage(config, password)?; + mnemonic_out = Some(mnemonic); + Ok(storage) + }, 0, - ) + )?; + Ok(( + wallet, + mnemonic_out.expect("mnemonic should be set after new_storage"), + )) } fn new( @@ -129,11 +156,25 @@ impl WalletCore { config.apply_overrides(config_overrides); } - let sequencer_client = Arc::new(SequencerClient::new_with_auth( - config.sequencer_addr.clone(), - config.basic_auth.clone(), - )?); - let tx_poller = TxPoller::new(&config, Arc::clone(&sequencer_client)); + let sequencer_client = { + let mut builder = SequencerClientBuilder::default(); + if let Some(basic_auth) = &config.basic_auth { + builder = builder.set_headers( + std::iter::once(( + "Authorization".parse().expect("Header name is valid"), + format!("Basic {basic_auth}") + .parse() + .context("Invalid basic auth format")?, + )) + .collect(), + ); + } + builder + .build(config.sequencer_addr.clone()) + .context("Failed to create sequencer client")? + }; + + let tx_poller = TxPoller::new(&config, sequencer_client.clone()); let storage = storage_ctor(config)?; @@ -160,9 +201,13 @@ impl WalletCore { &self.storage } - /// Reset storage. - pub fn reset_storage(&mut self, password: String) -> Result<()> { - self.storage = WalletChainStore::new_storage(self.storage.wallet_config.clone(), password)?; + /// Restore storage from an existing mnemonic phrase. + pub fn restore_storage(&mut self, mnemonic: &Mnemonic, password: &str) -> Result<()> { + self.storage = WalletChainStore::restore_storage( + self.storage.wallet_config.clone(), + mnemonic, + password, + )?; Ok(()) } @@ -222,26 +267,17 @@ impl WalletCore { /// Get account balance. pub async fn get_account_balance(&self, acc: AccountId) -> Result { - Ok(self - .sequencer_client - .get_account_balance(acc) - .await? - .balance) + Ok(self.sequencer_client.get_account_balance(acc).await?) } /// Get accounts nonces. - pub async fn get_accounts_nonces(&self, accs: Vec) -> Result> { - Ok(self - .sequencer_client - .get_accounts_nonces(accs) - .await? - .nonces) + pub async fn get_accounts_nonces(&self, accs: Vec) -> Result> { + Ok(self.sequencer_client.get_accounts_nonces(accs).await?) } /// Get account. pub async fn get_account_public(&self, account_id: AccountId) -> Result { - let response = self.sequencer_client.get_account(account_id).await?; - Ok(response.account) + Ok(self.sequencer_client.get_account(account_id).await?) } #[must_use] @@ -265,16 +301,12 @@ impl WalletCore { #[must_use] pub fn get_private_account_commitment(&self, account_id: AccountId) -> Option { let (keys, account) = self.storage.user_data.get_private_account(account_id)?; - Some(Commitment::new(&keys.nullifer_public_key, account)) + Some(Commitment::new(&keys.nullifier_public_key, account)) } /// Poll transactions. pub async fn poll_native_token_transfer(&self, hash: HashType) -> Result { - let transaction_encoded = self.poller.poll_tx(hash).await?; - let tx_base64_decode = BASE64.decode(transaction_encoded)?; - let pub_tx = borsh::from_slice::(&tx_base64_decode).unwrap(); - - Ok(pub_tx) + self.poller.poll_tx(hash).await } pub async fn check_private_account_initialized( @@ -285,7 +317,7 @@ impl WalletCore { self.sequencer_client .get_proof_for_commitment(acc_comm) .await - .map_err(anyhow::Error::from) + .map_err(Into::into) } else { Ok(None) } @@ -325,17 +357,12 @@ impl WalletCore { Ok(()) } - // TODO: handle large Err-variant properly - #[expect( - clippy::result_large_err, - reason = "ExecutionFailureKind is large, tracked by TODO" - )] pub async fn send_privacy_preserving_tx( &self, accounts: Vec, instruction_data: InstructionData, program: &ProgramWithDependencies, - ) -> Result<(SendTxResponse, Vec), ExecutionFailureKind> { + ) -> Result<(HashType, Vec), ExecutionFailureKind> { self.send_privacy_preserving_tx_with_pre_check(accounts, instruction_data, program, |_| { Ok(()) }) @@ -348,7 +375,7 @@ impl WalletCore { instruction_data: InstructionData, program: &ProgramWithDependencies, tx_pre_check: impl FnOnce(&[&Account]) -> Result<(), ExecutionFailureKind>, - ) -> Result<(SendTxResponse, Vec), ExecutionFailureKind> { + ) -> Result<(HashType, Vec), ExecutionFailureKind> { let acc_manager = privacy_preserving_tx::AccountManager::new(self, accounts).await?; let pre_states = acc_manager.pre_states(); @@ -400,7 +427,9 @@ impl WalletCore { .collect(); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client + .send_transaction(NSSATransaction::PrivacyPreserving(tx)) + .await?, shared_secrets, )) } @@ -427,11 +456,11 @@ impl WalletCore { let bar = indicatif::ProgressBar::new(num_of_blocks); while let Some(block) = blocks.try_next().await? { - for tx in block.transactions { + for tx in block.body.transactions { self.sync_private_accounts_with_tx(tx); } - self.last_synced_block = block.block_id; + self.last_synced_block = block.header.block_id; self.store_persistent_data().await?; bar.inc(1); } @@ -469,7 +498,7 @@ impl WalletCore { let affected_accounts = private_account_key_chains .flat_map(|(acc_account_id, key_chain, index)| { let view_tag = EncryptedAccountData::compute_view_tag( - &key_chain.nullifer_public_key, + &key_chain.nullifier_public_key, &key_chain.viewing_public_key, ); diff --git a/wallet/src/main.rs b/wallet/src/main.rs index 4704675b..cf8356db 100644 --- a/wallet/src/main.rs +++ b/wallet/src/main.rs @@ -15,7 +15,7 @@ use wallet::{ // TODO #169: We have sample configs for sequencer, but not for wallet // TODO #168: Why it requires config as a directory? Maybe better to deduce directory from config // file path? -// TODO #172: Why it requires config as env var while sequencer_runner accepts as +// TODO #172: Why it requires config as env var while sequencer_service accepts as // argument? #[tokio::main] async fn main() -> Result<()> { @@ -46,13 +46,21 @@ async fn main() -> Result<()> { println!("Persistent storage not found, need to execute setup"); let password = read_password_from_stdin()?; - let wallet = WalletCore::new_init_storage( + let (wallet, mnemonic) = WalletCore::new_init_storage( config_path, storage_path, Some(config_overrides), - password, + &password, )?; + println!(); + println!("IMPORTANT: Write down your recovery phrase and store it securely."); + println!("This is the only way to recover your wallet if you lose access."); + println!(); + println!("Recovery phrase:"); + println!(" {mnemonic}"); + println!(); + wallet.store_persistent_data().await?; wallet }; diff --git a/wallet/src/pinata_interactions.rs b/wallet/src/pinata_interactions.rs index abcfcf6a..b883e7e6 100644 --- a/wallet/src/pinata_interactions.rs +++ b/wallet/src/pinata_interactions.rs @@ -1,10 +1,12 @@ -use common::{error::ExecutionFailureKind, sequencer_client::json::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; +use sequencer_service_rpc::RpcClient as _; use key_protocol::key_management::ephemeral_key_holder::EphemeralKeyHolder; use nssa::{AccountId, privacy_preserving_transaction::circuit}; use nssa_core::{MembershipProof, SharedSecretKey, account::AccountWithMetadata}; use crate::{ - WalletCore, helperfunctions::produce_random_nonces, transaction_utils::AccountPreparedData, + ExecutionFailureKind, WalletCore, helperfunctions::produce_random_nonces, + transaction_utils::AccountPreparedData, }; impl WalletCore { @@ -13,7 +15,7 @@ impl WalletCore { pinata_account_id: AccountId, winner_account_id: AccountId, solution: u128, - ) -> Result { + ) -> Result { let account_ids = vec![pinata_account_id, winner_account_id]; let program_id = nssa::program::Program::pinata().id(); let message = @@ -23,7 +25,7 @@ impl WalletCore { let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.sequencer_client.send_tx_public(tx).await?) + Ok(self.sequencer_client.send_transaction(NSSATransaction::Public(tx).into()).await?) } pub async fn claim_pinata_private_owned_account_already_initialized( @@ -32,7 +34,7 @@ impl WalletCore { winner_account_id: AccountId, solution: u128, winner_proof: MembershipProof, - ) -> Result<(SendTxResponse, [SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 1]), ExecutionFailureKind> { let AccountPreparedData { nsk: winner_nsk, npk: winner_npk, @@ -89,7 +91,7 @@ impl WalletCore { ); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_winner], )) } @@ -99,7 +101,7 @@ impl WalletCore { pinata_account_id: AccountId, winner_account_id: AccountId, solution: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 1]), ExecutionFailureKind> { let AccountPreparedData { nsk: _, npk: winner_npk, @@ -156,7 +158,7 @@ impl WalletCore { ); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_winner], )) } diff --git a/wallet/src/poller.rs b/wallet/src/poller.rs index 113f42ee..7dbd59c1 100644 --- a/wallet/src/poller.rs +++ b/wallet/src/poller.rs @@ -1,8 +1,9 @@ -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use anyhow::Result; -use common::{HashType, block::HashableBlockData, sequencer_client::SequencerClient}; +use common::{HashType, block::Block, transaction::NSSATransaction}; use log::{info, warn}; +use sequencer_service_rpc::{RpcClient as _, SequencerClient}; use crate::config::WalletConfig; @@ -13,12 +14,12 @@ pub struct TxPoller { polling_max_error_attempts: u64, polling_delay: Duration, block_poll_max_amount: u64, - client: Arc, + client: SequencerClient, } impl TxPoller { #[must_use] - pub const fn new(config: &WalletConfig, client: Arc) -> Self { + pub const fn new(config: &WalletConfig, client: SequencerClient) -> Self { Self { polling_delay: config.seq_poll_timeout, polling_max_blocks_to_query: config.seq_tx_poll_max_blocks, @@ -29,7 +30,7 @@ impl TxPoller { } // TODO: this polling is not based on blocks, but on timeouts, need to fix this. - pub async fn poll_tx(&self, tx_hash: HashType) -> Result { + pub async fn poll_tx(&self, tx_hash: HashType) -> Result { let max_blocks_to_query = self.polling_max_blocks_to_query; info!("Starting poll for transaction {tx_hash}"); @@ -38,29 +39,22 @@ impl TxPoller { let mut try_error_counter = 0_u64; - let tx_obj = loop { - let tx_obj = self - .client - .get_transaction_by_hash(tx_hash) - .await - .inspect_err(|err| { + loop { + match self.client.get_transaction(tx_hash).await { + Ok(Some(tx)) => return Ok(tx), + Ok(None) => {} + Err(err) => { warn!("Failed to get transaction by hash {tx_hash} with error: {err:#?}"); - }); - - if let Ok(tx_obj) = tx_obj { - break tx_obj; + } } + try_error_counter = try_error_counter .checked_add(1) .expect("We check error counter in this loop"); if try_error_counter > self.polling_max_error_attempts { - anyhow::bail!("Number of retries exceeded"); + break; } - }; - - if let Some(tx) = tx_obj.transaction { - return Ok(tx); } tokio::time::sleep(self.polling_delay).await; @@ -72,16 +66,15 @@ impl TxPoller { pub fn poll_block_range( &self, range: std::ops::RangeInclusive, - ) -> impl futures::Stream> { + ) -> impl futures::Stream> { async_stream::stream! { let mut chunk_start = *range.start(); loop { let chunk_end = std::cmp::min(chunk_start.saturating_add(self.block_poll_max_amount).saturating_sub(1), *range.end()); - let blocks = self.client.get_block_range(chunk_start..=chunk_end).await?.blocks; + let blocks = self.client.get_block_range(chunk_start, chunk_end).await?; for block in blocks { - let block = borsh::from_slice::(&block)?; yield Ok(block); } diff --git a/wallet/src/privacy_preserving_tx.rs b/wallet/src/privacy_preserving_tx.rs index 5418c58d..04056111 100644 --- a/wallet/src/privacy_preserving_tx.rs +++ b/wallet/src/privacy_preserving_tx.rs @@ -1,5 +1,4 @@ use anyhow::Result; -use common::error::ExecutionFailureKind; use key_protocol::key_management::ephemeral_key_holder::EphemeralKeyHolder; use nssa::{AccountId, PrivateKey}; use nssa_core::{ @@ -8,7 +7,7 @@ use nssa_core::{ encryption::{EphemeralPublicKey, ViewingPublicKey}, }; -use crate::WalletCore; +use crate::{ExecutionFailureKind, WalletCore}; #[derive(Clone)] pub enum PrivacyPreservingAccount { @@ -214,7 +213,7 @@ async fn private_acc_preparation( let nsk = from_keys.private_key_holder.nullifier_secret_key; - let from_npk = from_keys.nullifer_public_key; + let from_npk = from_keys.nullifier_public_key; let from_vpk = from_keys.viewing_public_key; // TODO: Remove this unwrap, error types must be compatible diff --git a/wallet/src/program_facades/amm.rs b/wallet/src/program_facades/amm.rs index 19a51f29..b31d0658 100644 --- a/wallet/src/program_facades/amm.rs +++ b/wallet/src/program_facades/amm.rs @@ -1,9 +1,10 @@ use amm_core::{compute_liquidity_token_pda, compute_pool_pda, compute_vault_pda}; -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; use nssa::{AccountId, program::Program}; +use sequencer_service_rpc::RpcClient as _; use token_core::TokenHolding; -use crate::WalletCore; +use crate::{ExecutionFailureKind, WalletCore}; pub struct Amm<'wallet>(pub &'wallet WalletCore); impl Amm<'_> { @@ -14,7 +15,7 @@ impl Amm<'_> { user_holding_lp: AccountId, balance_a: u128, balance_b: u128, - ) -> Result { + ) -> Result { let program = Program::amm(); let amm_program_id = Program::amm().id(); let instruction = amm_core::Instruction::NewDefinition { @@ -57,18 +58,21 @@ impl Amm<'_> { user_holding_lp, ]; - let nonces = self + let mut nonces = self .0 .get_accounts_nonces(vec![user_holding_a, user_holding_b]) .await .map_err(ExecutionFailureKind::SequencerError)?; + let mut private_keys = Vec::new(); + let signing_key_a = self .0 .storage .user_data .get_pub_account_signing_key(user_holding_a) .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + private_keys.push(signing_key_a); let signing_key_b = self .0 @@ -76,37 +80,56 @@ impl Amm<'_> { .user_data .get_pub_account_signing_key(user_holding_b) .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + private_keys.push(signing_key_b); + + if let Some(signing_key_lp) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(user_holding_lp) + { + private_keys.push(signing_key_lp); + let lp_nonces = self + .0 + .get_accounts_nonces(vec![user_holding_lp]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + nonces.extend(lp_nonces); + } else { + println!( + "Liquidity pool tokens receiver's account ({user_holding_lp}) private key not found in wallet. Proceeding with only liquidity provider's keys." + ); + } let message = nssa::public_transaction::Message::try_new( program.id(), account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), + nonces, instruction, ) .unwrap(); - let witness_set = nssa::public_transaction::WitnessSet::for_message( - &message, - &[signing_key_a, signing_key_b], - ); + let witness_set = + nssa::public_transaction::WitnessSet::for_message(&message, &private_keys); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } - pub async fn send_swap( + pub async fn send_swap_exact_input( &self, user_holding_a: AccountId, user_holding_b: AccountId, swap_amount_in: u128, min_amount_out: u128, token_definition_id_in: AccountId, - ) -> Result { - let instruction = amm_core::Instruction::Swap { + ) -> Result { + let instruction = amm_core::Instruction::SwapExactInput { swap_amount_in, min_amount_out, token_definition_id_in, @@ -145,34 +168,15 @@ impl Amm<'_> { user_holding_b, ]; - let account_id_auth; - - // Checking, which account are associated with TokenDefinition - let token_holder_acc_a = self - .0 - .get_account_public(user_holding_a) - .await - .map_err(ExecutionFailureKind::SequencerError)?; - let token_holder_acc_b = self - .0 - .get_account_public(user_holding_b) - .await - .map_err(ExecutionFailureKind::SequencerError)?; - - let token_holder_a = TokenHolding::try_from(&token_holder_acc_a.data) - .map_err(|_err| ExecutionFailureKind::AccountDataError(user_holding_a))?; - let token_holder_b = TokenHolding::try_from(&token_holder_acc_b.data) - .map_err(|_err| ExecutionFailureKind::AccountDataError(user_holding_b))?; - - if token_holder_a.definition_id() == token_definition_id_in { - account_id_auth = user_holding_a; - } else if token_holder_b.definition_id() == token_definition_id_in { - account_id_auth = user_holding_b; + let account_id_auth = if definition_token_a_id == token_definition_id_in { + user_holding_a + } else if definition_token_b_id == token_definition_id_in { + user_holding_b } else { return Err(ExecutionFailureKind::AccountDataError( token_definition_id_in, )); - } + }; let nonces = self .0 @@ -190,10 +194,7 @@ impl Amm<'_> { let message = nssa::public_transaction::Message::try_new( program.id(), account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), + nonces, instruction, ) .unwrap(); @@ -203,7 +204,101 @@ impl Amm<'_> { let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) + } + + pub async fn send_swap_exact_output( + &self, + user_holding_a: AccountId, + user_holding_b: AccountId, + exact_amount_out: u128, + max_amount_in: u128, + token_definition_id_in: AccountId, + ) -> Result { + let instruction = amm_core::Instruction::SwapExactOutput { + exact_amount_out, + max_amount_in, + token_definition_id_in, + }; + let program = Program::amm(); + let amm_program_id = Program::amm().id(); + + let user_a_acc = self + .0 + .get_account_public(user_holding_a) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + let user_b_acc = self + .0 + .get_account_public(user_holding_b) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + + let definition_token_a_id = TokenHolding::try_from(&user_a_acc.data) + .map_err(|_err| ExecutionFailureKind::AccountDataError(user_holding_a))? + .definition_id(); + let definition_token_b_id = TokenHolding::try_from(&user_b_acc.data) + .map_err(|_err| ExecutionFailureKind::AccountDataError(user_holding_b))? + .definition_id(); + + let amm_pool = + compute_pool_pda(amm_program_id, definition_token_a_id, definition_token_b_id); + let vault_holding_a = compute_vault_pda(amm_program_id, amm_pool, definition_token_a_id); + let vault_holding_b = compute_vault_pda(amm_program_id, amm_pool, definition_token_b_id); + + let account_ids = vec![ + amm_pool, + vault_holding_a, + vault_holding_b, + user_holding_a, + user_holding_b, + ]; + + let account_id_auth = if definition_token_a_id == token_definition_id_in { + user_holding_a + } else if definition_token_b_id == token_definition_id_in { + user_holding_b + } else { + return Err(ExecutionFailureKind::AccountDataError( + token_definition_id_in, + )); + }; + + let nonces = self + .0 + .get_accounts_nonces(vec![account_id_auth]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + + let signing_key = self + .0 + .storage + .user_data + .get_pub_account_signing_key(account_id_auth) + .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + + let message = nssa::public_transaction::Message::try_new( + program.id(), + account_ids, + nonces, + instruction, + ) + .unwrap(); + + let witness_set = + nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + + let tx = nssa::PublicTransaction::new(message, witness_set); + + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_add_liquidity( @@ -214,7 +309,7 @@ impl Amm<'_> { min_amount_liquidity: u128, max_amount_to_add_token_a: u128, max_amount_to_add_token_b: u128, - ) -> Result { + ) -> Result { let instruction = amm_core::Instruction::AddLiquidity { min_amount_liquidity, max_amount_to_add_token_a, @@ -280,10 +375,7 @@ impl Amm<'_> { let message = nssa::public_transaction::Message::try_new( program.id(), account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), + nonces, instruction, ) .unwrap(); @@ -295,7 +387,11 @@ impl Amm<'_> { let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_remove_liquidity( @@ -306,7 +402,7 @@ impl Amm<'_> { remove_liquidity_amount: u128, min_amount_to_remove_token_a: u128, min_amount_to_remove_token_b: u128, - ) -> Result { + ) -> Result { let instruction = amm_core::Instruction::RemoveLiquidity { remove_liquidity_amount, min_amount_to_remove_token_a, @@ -365,10 +461,7 @@ impl Amm<'_> { let message = nssa::public_transaction::Message::try_new( program.id(), account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), + nonces, instruction, ) .unwrap(); @@ -378,6 +471,10 @@ impl Amm<'_> { let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } } diff --git a/wallet/src/program_facades/ata.rs b/wallet/src/program_facades/ata.rs new file mode 100644 index 00000000..ac60fb63 --- /dev/null +++ b/wallet/src/program_facades/ata.rs @@ -0,0 +1,280 @@ +use std::collections::HashMap; + +use ata_core::{compute_ata_seed, get_associated_token_account_id}; +use common::{HashType, transaction::NSSATransaction}; +use nssa::{ + AccountId, privacy_preserving_transaction::circuit::ProgramWithDependencies, program::Program, +}; +use nssa_core::SharedSecretKey; +use sequencer_service_rpc::RpcClient as _; + +use crate::{ExecutionFailureKind, PrivacyPreservingAccount, WalletCore}; + +pub struct Ata<'wallet>(pub &'wallet WalletCore); + +impl Ata<'_> { + pub async fn send_create( + &self, + owner_id: AccountId, + definition_id: AccountId, + ) -> Result { + let program = Program::ata(); + let ata_program_id = program.id(); + let ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let account_ids = vec![owner_id, definition_id, ata_id]; + + let nonces = self + .0 + .get_accounts_nonces(vec![owner_id]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + + let Some(signing_key) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(owner_id) + else { + return Err(ExecutionFailureKind::KeyNotFoundError); + }; + + let instruction = ata_core::Instruction::Create { ata_program_id }; + + let message = nssa::public_transaction::Message::try_new( + program.id(), + account_ids, + nonces, + instruction, + )?; + + let witness_set = + nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + + let tx = nssa::PublicTransaction::new(message, witness_set); + + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) + } + + pub async fn send_transfer( + &self, + owner_id: AccountId, + definition_id: AccountId, + recipient_id: AccountId, + amount: u128, + ) -> Result { + let program = Program::ata(); + let ata_program_id = program.id(); + let sender_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let account_ids = vec![owner_id, sender_ata_id, recipient_id]; + + let nonces = self + .0 + .get_accounts_nonces(vec![owner_id]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + + let Some(signing_key) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(owner_id) + else { + return Err(ExecutionFailureKind::KeyNotFoundError); + }; + + let instruction = ata_core::Instruction::Transfer { + ata_program_id, + amount, + }; + + let message = nssa::public_transaction::Message::try_new( + program.id(), + account_ids, + nonces, + instruction, + )?; + + let witness_set = + nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + + let tx = nssa::PublicTransaction::new(message, witness_set); + + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) + } + + pub async fn send_burn( + &self, + owner_id: AccountId, + definition_id: AccountId, + amount: u128, + ) -> Result { + let program = Program::ata(); + let ata_program_id = program.id(); + let holder_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let account_ids = vec![owner_id, holder_ata_id, definition_id]; + + let nonces = self + .0 + .get_accounts_nonces(vec![owner_id]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + + let Some(signing_key) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(owner_id) + else { + return Err(ExecutionFailureKind::KeyNotFoundError); + }; + + let instruction = ata_core::Instruction::Burn { + ata_program_id, + amount, + }; + + let message = nssa::public_transaction::Message::try_new( + program.id(), + account_ids, + nonces, + instruction, + )?; + + let witness_set = + nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + + let tx = nssa::PublicTransaction::new(message, witness_set); + + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) + } + + pub async fn send_create_private_owner( + &self, + owner_id: AccountId, + definition_id: AccountId, + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { + let ata_program_id = Program::ata().id(); + let ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let instruction = ata_core::Instruction::Create { ata_program_id }; + let instruction_data = + Program::serialize_instruction(instruction).expect("Instruction should serialize"); + + let accounts = vec![ + PrivacyPreservingAccount::PrivateOwned(owner_id), + PrivacyPreservingAccount::Public(definition_id), + PrivacyPreservingAccount::Public(ata_id), + ]; + + self.0 + .send_privacy_preserving_tx(accounts, instruction_data, &ata_with_token_dependency()) + .await + .map(|(hash, mut secrets)| { + let secret = secrets.pop().expect("expected owner's secret"); + (hash, secret) + }) + } + + pub async fn send_transfer_private_owner( + &self, + owner_id: AccountId, + definition_id: AccountId, + recipient_id: AccountId, + amount: u128, + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { + let ata_program_id = Program::ata().id(); + let sender_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let instruction = ata_core::Instruction::Transfer { + ata_program_id, + amount, + }; + let instruction_data = + Program::serialize_instruction(instruction).expect("Instruction should serialize"); + + let accounts = vec![ + PrivacyPreservingAccount::PrivateOwned(owner_id), + PrivacyPreservingAccount::Public(sender_ata_id), + PrivacyPreservingAccount::Public(recipient_id), + ]; + + self.0 + .send_privacy_preserving_tx(accounts, instruction_data, &ata_with_token_dependency()) + .await + .map(|(hash, mut secrets)| { + let secret = secrets.pop().expect("expected owner's secret"); + (hash, secret) + }) + } + + pub async fn send_burn_private_owner( + &self, + owner_id: AccountId, + definition_id: AccountId, + amount: u128, + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { + let ata_program_id = Program::ata().id(); + let holder_ata_id = get_associated_token_account_id( + &ata_program_id, + &compute_ata_seed(owner_id, definition_id), + ); + + let instruction = ata_core::Instruction::Burn { + ata_program_id, + amount, + }; + let instruction_data = + Program::serialize_instruction(instruction).expect("Instruction should serialize"); + + let accounts = vec![ + PrivacyPreservingAccount::PrivateOwned(owner_id), + PrivacyPreservingAccount::Public(holder_ata_id), + PrivacyPreservingAccount::Public(definition_id), + ]; + + self.0 + .send_privacy_preserving_tx(accounts, instruction_data, &ata_with_token_dependency()) + .await + .map(|(hash, mut secrets)| { + let secret = secrets.pop().expect("expected owner's secret"); + (hash, secret) + }) + } +} + +fn ata_with_token_dependency() -> ProgramWithDependencies { + let token = Program::token(); + let mut deps = HashMap::new(); + deps.insert(token.id(), token); + ProgramWithDependencies::new(Program::ata(), deps) +} diff --git a/wallet/src/program_facades/mod.rs b/wallet/src/program_facades/mod.rs index 5fdcdb39..a0f8189c 100644 --- a/wallet/src/program_facades/mod.rs +++ b/wallet/src/program_facades/mod.rs @@ -2,6 +2,7 @@ //! on-chain programs. pub mod amm; +pub mod ata; pub mod native_token_transfer; pub mod pinata; pub mod token; diff --git a/wallet/src/program_facades/native_token_transfer/deshielded.rs b/wallet/src/program_facades/native_token_transfer/deshielded.rs index 7b774595..d51f15ce 100644 --- a/wallet/src/program_facades/native_token_transfer/deshielded.rs +++ b/wallet/src/program_facades/native_token_transfer/deshielded.rs @@ -1,8 +1,8 @@ -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::HashType; use nssa::AccountId; use super::{NativeTokenTransfer, auth_transfer_preparation}; -use crate::PrivacyPreservingAccount; +use crate::{ExecutionFailureKind, PrivacyPreservingAccount}; impl NativeTokenTransfer<'_> { pub async fn send_deshielded_transfer( @@ -10,7 +10,7 @@ impl NativeTokenTransfer<'_> { from: AccountId, to: AccountId, balance_to_move: u128, - ) -> Result<(SendTxResponse, nssa_core::SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, nssa_core::SharedSecretKey), ExecutionFailureKind> { let (instruction_data, program, tx_pre_check) = auth_transfer_preparation(balance_to_move); self.0 diff --git a/wallet/src/program_facades/native_token_transfer/mod.rs b/wallet/src/program_facades/native_token_transfer/mod.rs index 1db864f6..c771c735 100644 --- a/wallet/src/program_facades/native_token_transfer/mod.rs +++ b/wallet/src/program_facades/native_token_transfer/mod.rs @@ -1,8 +1,7 @@ -use common::error::ExecutionFailureKind; use nssa::{Account, program::Program}; use nssa_core::program::InstructionData; -use crate::WalletCore; +use crate::{ExecutionFailureKind, WalletCore}; pub mod deshielded; pub mod private; @@ -15,11 +14,6 @@ pub mod shielded; )] pub struct NativeTokenTransfer<'wallet>(pub &'wallet WalletCore); -// TODO: handle large Err-variant properly -#[expect( - clippy::result_large_err, - reason = "ExecutionFailureKind is large, tracked by TODO" -)] fn auth_transfer_preparation( balance_to_move: u128, ) -> ( diff --git a/wallet/src/program_facades/native_token_transfer/private.rs b/wallet/src/program_facades/native_token_transfer/private.rs index eb37ec94..c3a2125b 100644 --- a/wallet/src/program_facades/native_token_transfer/private.rs +++ b/wallet/src/program_facades/native_token_transfer/private.rs @@ -1,17 +1,17 @@ use std::vec; -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::HashType; use nssa::{AccountId, program::Program}; use nssa_core::{NullifierPublicKey, SharedSecretKey, encryption::ViewingPublicKey}; use super::{NativeTokenTransfer, auth_transfer_preparation}; -use crate::PrivacyPreservingAccount; +use crate::{ExecutionFailureKind, PrivacyPreservingAccount}; impl NativeTokenTransfer<'_> { pub async fn register_account_private( &self, from: AccountId, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction: u128 = 0; self.0 @@ -34,7 +34,7 @@ impl NativeTokenTransfer<'_> { to_npk: NullifierPublicKey, to_vpk: ViewingPublicKey, balance_to_move: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let (instruction_data, program, tx_pre_check) = auth_transfer_preparation(balance_to_move); self.0 @@ -64,7 +64,7 @@ impl NativeTokenTransfer<'_> { from: AccountId, to: AccountId, balance_to_move: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let (instruction_data, program, tx_pre_check) = auth_transfer_preparation(balance_to_move); self.0 diff --git a/wallet/src/program_facades/native_token_transfer/public.rs b/wallet/src/program_facades/native_token_transfer/public.rs index eefaa1fe..2d936d3f 100644 --- a/wallet/src/program_facades/native_token_transfer/public.rs +++ b/wallet/src/program_facades/native_token_transfer/public.rs @@ -1,11 +1,13 @@ -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; use nssa::{ AccountId, PublicTransaction, program::Program, public_transaction::{Message, WitnessSet}, }; +use sequencer_service_rpc::RpcClient as _; use super::NativeTokenTransfer; +use crate::ExecutionFailureKind; impl NativeTokenTransfer<'_> { pub async fn send_public_transfer( @@ -13,7 +15,7 @@ impl NativeTokenTransfer<'_> { from: AccountId, to: AccountId, balance_to_move: u128, - ) -> Result { + ) -> Result { let balance = self .0 .get_account_balance(from) @@ -21,36 +23,48 @@ impl NativeTokenTransfer<'_> { .map_err(ExecutionFailureKind::SequencerError)?; if balance >= balance_to_move { - let nonces = self + let account_ids = vec![from, to]; + let program_id = Program::authenticated_transfer_program().id(); + + let mut nonces = self .0 .get_accounts_nonces(vec![from]) .await .map_err(ExecutionFailureKind::SequencerError)?; - let account_ids = vec![from, to]; - let program_id = Program::authenticated_transfer_program().id(); - let message = Message::try_new( - program_id, - account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), - balance_to_move, - ) - .unwrap(); - - let signing_key = self.0.storage.user_data.get_pub_account_signing_key(from); - - let Some(signing_key) = signing_key else { + let mut private_keys = Vec::new(); + let from_signing_key = self.0.storage.user_data.get_pub_account_signing_key(from); + let Some(from_signing_key) = from_signing_key else { return Err(ExecutionFailureKind::KeyNotFoundError); }; + private_keys.push(from_signing_key); - let witness_set = WitnessSet::for_message(&message, &[signing_key]); + let to_signing_key = self.0.storage.user_data.get_pub_account_signing_key(to); + if let Some(to_signing_key) = to_signing_key { + private_keys.push(to_signing_key); + let to_nonces = self + .0 + .get_accounts_nonces(vec![to]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + nonces.extend(to_nonces); + } else { + println!( + "Receiver's account ({to}) private key not found in wallet. Proceeding with only sender's key." + ); + } + + let message = + Message::try_new(program_id, account_ids, nonces, balance_to_move).unwrap(); + let witness_set = WitnessSet::for_message(&message, &private_keys); let tx = PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } else { Err(ExecutionFailureKind::InsufficientFundsError) } @@ -59,7 +73,7 @@ impl NativeTokenTransfer<'_> { pub async fn register_account( &self, from: AccountId, - ) -> Result { + ) -> Result { let nonces = self .0 .get_accounts_nonces(vec![from]) @@ -69,16 +83,7 @@ impl NativeTokenTransfer<'_> { let instruction: u128 = 0; let account_ids = vec![from]; let program_id = Program::authenticated_transfer_program().id(); - let message = Message::try_new( - program_id, - account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), - instruction, - ) - .unwrap(); + let message = Message::try_new(program_id, account_ids, nonces, instruction).unwrap(); let signing_key = self.0.storage.user_data.get_pub_account_signing_key(from); @@ -90,6 +95,10 @@ impl NativeTokenTransfer<'_> { let tx = PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } } diff --git a/wallet/src/program_facades/native_token_transfer/shielded.rs b/wallet/src/program_facades/native_token_transfer/shielded.rs index 22897502..625e1a8b 100644 --- a/wallet/src/program_facades/native_token_transfer/shielded.rs +++ b/wallet/src/program_facades/native_token_transfer/shielded.rs @@ -1,9 +1,9 @@ -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::HashType; use nssa::AccountId; use nssa_core::{NullifierPublicKey, SharedSecretKey, encryption::ViewingPublicKey}; use super::{NativeTokenTransfer, auth_transfer_preparation}; -use crate::PrivacyPreservingAccount; +use crate::{ExecutionFailureKind, PrivacyPreservingAccount}; impl NativeTokenTransfer<'_> { pub async fn send_shielded_transfer( @@ -11,7 +11,7 @@ impl NativeTokenTransfer<'_> { from: AccountId, to: AccountId, balance_to_move: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let (instruction_data, program, tx_pre_check) = auth_transfer_preparation(balance_to_move); self.0 @@ -40,7 +40,7 @@ impl NativeTokenTransfer<'_> { to_npk: NullifierPublicKey, to_vpk: ViewingPublicKey, balance_to_move: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let (instruction_data, program, tx_pre_check) = auth_transfer_preparation(balance_to_move); self.0 diff --git a/wallet/src/program_facades/pinata.rs b/wallet/src/program_facades/pinata.rs index c68fa658..97118ecd 100644 --- a/wallet/src/program_facades/pinata.rs +++ b/wallet/src/program_facades/pinata.rs @@ -1,8 +1,9 @@ -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; use nssa::AccountId; use nssa_core::{MembershipProof, SharedSecretKey}; +use sequencer_service_rpc::RpcClient as _; -use crate::{PrivacyPreservingAccount, WalletCore}; +use crate::{ExecutionFailureKind, PrivacyPreservingAccount, WalletCore}; pub struct Pinata<'wallet>(pub &'wallet WalletCore); @@ -12,7 +13,7 @@ impl Pinata<'_> { pinata_account_id: AccountId, winner_account_id: AccountId, solution: u128, - ) -> Result { + ) -> Result { let account_ids = vec![pinata_account_id, winner_account_id]; let program_id = nssa::program::Program::pinata().id(); let message = @@ -22,7 +23,11 @@ impl Pinata<'_> { let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } /// Claim a pinata reward using a privacy-preserving transaction for an already-initialized @@ -36,7 +41,7 @@ impl Pinata<'_> { winner_account_id: AccountId, solution: u128, _winner_proof: MembershipProof, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { self.claim_private_owned_account(pinata_account_id, winner_account_id, solution) .await } @@ -46,7 +51,7 @@ impl Pinata<'_> { pinata_account_id: AccountId, winner_account_id: AccountId, solution: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { self.0 .send_privacy_preserving_tx( vec![ diff --git a/wallet/src/program_facades/token.rs b/wallet/src/program_facades/token.rs index bdacae37..1f941c8c 100644 --- a/wallet/src/program_facades/token.rs +++ b/wallet/src/program_facades/token.rs @@ -1,9 +1,10 @@ -use common::{error::ExecutionFailureKind, rpc_primitives::requests::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; use nssa::{AccountId, program::Program}; use nssa_core::{NullifierPublicKey, SharedSecretKey, encryption::ViewingPublicKey}; +use sequencer_service_rpc::RpcClient as _; use token_core::Instruction; -use crate::{PrivacyPreservingAccount, WalletCore}; +use crate::{ExecutionFailureKind, PrivacyPreservingAccount, WalletCore}; pub struct Token<'wallet>(pub &'wallet WalletCore); @@ -14,23 +15,48 @@ impl Token<'_> { supply_account_id: AccountId, name: String, total_supply: u128, - ) -> Result { + ) -> Result { let account_ids = vec![definition_account_id, supply_account_id]; let program_id = nssa::program::Program::token().id(); let instruction = Instruction::NewFungibleDefinition { name, total_supply }; + let nonces = self + .0 + .get_accounts_nonces(account_ids.clone()) + .await + .map_err(ExecutionFailureKind::SequencerError)?; let message = nssa::public_transaction::Message::try_new( program_id, account_ids, - vec![], + nonces, instruction, ) .unwrap(); - let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]); + let def_private_key = self + .0 + .storage + .user_data + .get_pub_account_signing_key(definition_account_id) + .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + let supply_private_key = self + .0 + .storage + .user_data + .get_pub_account_signing_key(supply_account_id) + .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + + let witness_set = nssa::public_transaction::WitnessSet::for_message( + &message, + &[def_private_key, supply_private_key], + ); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_new_definition_private_owned_supply( @@ -39,7 +65,7 @@ impl Token<'_> { supply_account_id: AccountId, name: String, total_supply: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::NewFungibleDefinition { name, total_supply }; let instruction_data = Program::serialize_instruction(instruction).expect("Instruction should serialize"); @@ -69,7 +95,7 @@ impl Token<'_> { supply_account_id: AccountId, name: String, total_supply: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::NewFungibleDefinition { name, total_supply }; let instruction_data = Program::serialize_instruction(instruction).expect("Instruction should serialize"); @@ -99,7 +125,7 @@ impl Token<'_> { supply_account_id: AccountId, name: String, total_supply: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::NewFungibleDefinition { name, total_supply }; let instruction_data = Program::serialize_instruction(instruction).expect("Instruction should serialize"); @@ -127,42 +153,63 @@ impl Token<'_> { sender_account_id: AccountId, recipient_account_id: AccountId, amount: u128, - ) -> Result { + ) -> Result { let account_ids = vec![sender_account_id, recipient_account_id]; let program_id = nssa::program::Program::token().id(); let instruction = Instruction::Transfer { amount_to_transfer: amount, }; - let nonces = self + let mut nonces = self .0 .get_accounts_nonces(vec![sender_account_id]) .await .map_err(ExecutionFailureKind::SequencerError)?; - let message = nssa::public_transaction::Message::try_new( - program_id, - account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), - instruction, - ) - .unwrap(); - let Some(signing_key) = self + let mut private_keys = Vec::new(); + let sender_sk = self .0 .storage .user_data .get_pub_account_signing_key(sender_account_id) - else { - return Err(ExecutionFailureKind::KeyNotFoundError); - }; + .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + private_keys.push(sender_sk); + + if let Some(recipient_sk) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(recipient_account_id) + { + private_keys.push(recipient_sk); + let recipient_nonces = self + .0 + .get_accounts_nonces(vec![recipient_account_id]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + nonces.extend(recipient_nonces); + } else { + println!( + "Receiver's account ({recipient_account_id}) private key not found in wallet. Proceeding with only sender's key." + ); + } + + let message = nssa::public_transaction::Message::try_new( + program_id, + account_ids, + nonces, + instruction, + ) + .unwrap(); let witness_set = - nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + nssa::public_transaction::WitnessSet::for_message(&message, &private_keys); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_transfer_transaction_private_owned_account( @@ -170,7 +217,7 @@ impl Token<'_> { sender_account_id: AccountId, recipient_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::Transfer { amount_to_transfer: amount, }; @@ -201,7 +248,7 @@ impl Token<'_> { recipient_npk: NullifierPublicKey, recipient_vpk: ViewingPublicKey, amount: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::Transfer { amount_to_transfer: amount, }; @@ -234,7 +281,7 @@ impl Token<'_> { sender_account_id: AccountId, recipient_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Transfer { amount_to_transfer: amount, }; @@ -265,7 +312,7 @@ impl Token<'_> { sender_account_id: AccountId, recipient_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Transfer { amount_to_transfer: amount, }; @@ -297,7 +344,7 @@ impl Token<'_> { recipient_npk: NullifierPublicKey, recipient_vpk: ViewingPublicKey, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Transfer { amount_to_transfer: amount, }; @@ -331,7 +378,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result { + ) -> Result { let account_ids = vec![definition_account_id, holder_account_id]; let instruction = Instruction::Burn { amount_to_burn: amount, @@ -345,10 +392,7 @@ impl Token<'_> { let message = nssa::public_transaction::Message::try_new( Program::token().id(), account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), + nonces, instruction, ) .expect("Instruction should serialize"); @@ -364,7 +408,11 @@ impl Token<'_> { let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_burn_transaction_private_owned_account( @@ -372,7 +420,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::Burn { amount_to_burn: amount, }; @@ -402,7 +450,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Burn { amount_to_burn: amount, }; @@ -433,7 +481,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Burn { amount_to_burn: amount, }; @@ -464,42 +512,63 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result { + ) -> Result { let account_ids = vec![definition_account_id, holder_account_id]; let instruction = Instruction::Mint { amount_to_mint: amount, }; - let nonces = self + let mut nonces = self .0 .get_accounts_nonces(vec![definition_account_id]) .await .map_err(ExecutionFailureKind::SequencerError)?; - let message = nssa::public_transaction::Message::try_new( - Program::token().id(), - account_ids, - nonces - .iter() - .map(|x| nssa_core::account::Nonce(*x)) - .collect(), - instruction, - ) - .unwrap(); - let Some(signing_key) = self + let mut private_keys = Vec::new(); + let definition_sk = self .0 .storage .user_data .get_pub_account_signing_key(definition_account_id) - else { - return Err(ExecutionFailureKind::KeyNotFoundError); - }; + .ok_or(ExecutionFailureKind::KeyNotFoundError)?; + private_keys.push(definition_sk); + + if let Some(holder_sk) = self + .0 + .storage + .user_data + .get_pub_account_signing_key(holder_account_id) + { + private_keys.push(holder_sk); + let recipient_nonces = self + .0 + .get_accounts_nonces(vec![holder_account_id]) + .await + .map_err(ExecutionFailureKind::SequencerError)?; + nonces.extend(recipient_nonces); + } else { + println!( + "Holder's account ({holder_account_id}) private key not found in wallet. Proceeding with only definition's key." + ); + } + + let message = nssa::public_transaction::Message::try_new( + Program::token().id(), + account_ids, + nonces, + instruction, + ) + .unwrap(); let witness_set = - nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]); + nssa::public_transaction::WitnessSet::for_message(&message, &private_keys); let tx = nssa::PublicTransaction::new(message, witness_set); - Ok(self.0.sequencer_client.send_tx_public(tx).await?) + Ok(self + .0 + .sequencer_client + .send_transaction(NSSATransaction::Public(tx)) + .await?) } pub async fn send_mint_transaction_private_owned_account( @@ -507,7 +576,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::Mint { amount_to_mint: amount, }; @@ -538,7 +607,7 @@ impl Token<'_> { holder_npk: NullifierPublicKey, holder_vpk: ViewingPublicKey, amount: u128, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let instruction = Instruction::Mint { amount_to_mint: amount, }; @@ -571,7 +640,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Mint { amount_to_mint: amount, }; @@ -602,7 +671,7 @@ impl Token<'_> { definition_account_id: AccountId, holder_account_id: AccountId, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Mint { amount_to_mint: amount, }; @@ -634,7 +703,7 @@ impl Token<'_> { holder_npk: NullifierPublicKey, holder_vpk: ViewingPublicKey, amount: u128, - ) -> Result<(SendTxResponse, SharedSecretKey), ExecutionFailureKind> { + ) -> Result<(HashType, SharedSecretKey), ExecutionFailureKind> { let instruction = Instruction::Mint { amount_to_mint: amount, }; diff --git a/wallet/src/transaction_utils.rs b/wallet/src/transaction_utils.rs index 2a48d3e6..1bcb971f 100644 --- a/wallet/src/transaction_utils.rs +++ b/wallet/src/transaction_utils.rs @@ -1,4 +1,5 @@ -use common::{error::ExecutionFailureKind, sequencer_client::json::SendTxResponse}; +use common::{HashType, transaction::NSSATransaction}; +use sequencer_service_rpc::RpcClient as _; use key_protocol::key_management::ephemeral_key_holder::EphemeralKeyHolder; use nssa::{ Account, AccountId, PrivacyPreservingTransaction, @@ -10,7 +11,7 @@ use nssa_core::{ account::AccountWithMetadata, encryption::ViewingPublicKey, program::InstructionData, }; -use crate::{WalletCore, helperfunctions::produce_random_nonces}; +use crate::{ExecutionFailureKind, WalletCore, helperfunctions::produce_random_nonces}; pub(crate) struct AccountPreparedData { pub nsk: Option, @@ -39,7 +40,7 @@ impl WalletCore { let mut nsk = None; let mut proof = None; - let from_npk = from_keys.nullifer_public_key; + let from_npk = from_keys.nullifier_public_key; let from_vpk = from_keys.viewing_public_key; let sender_commitment = Commitment::new(&from_npk, &from_acc); @@ -51,11 +52,12 @@ impl WalletCore { } if needs_proof { - proof = self - .sequencer_client - .get_proof_for_commitment(sender_commitment) - .await - .unwrap(); + proof = Some( + self.sequencer_client + .get_proof_for_commitment(sender_commitment) + .await + .unwrap(), + ); } Ok(AccountPreparedData { @@ -75,7 +77,7 @@ impl WalletCore { tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, to_proof: MembershipProof, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let AccountPreparedData { nsk: from_nsk, npk: from_npk, @@ -140,7 +142,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_from, shared_secret_to], )) } @@ -152,7 +154,7 @@ impl WalletCore { instruction_data: InstructionData, tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let AccountPreparedData { nsk: from_nsk, npk: from_npk, @@ -214,7 +216,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_from, shared_secret_to], )) } @@ -227,7 +229,7 @@ impl WalletCore { instruction_data: InstructionData, tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, - ) -> Result<(SendTxResponse, [SharedSecretKey; 2]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 2]), ExecutionFailureKind> { let AccountPreparedData { nsk: from_nsk, npk: from_npk, @@ -285,7 +287,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_from, shared_secret_to], )) } @@ -297,7 +299,7 @@ impl WalletCore { instruction_data: InstructionData, tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, - ) -> Result<(SendTxResponse, [nssa_core::SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [nssa_core::SharedSecretKey; 1]), ExecutionFailureKind> { let AccountPreparedData { nsk: from_nsk, npk: from_npk, @@ -345,7 +347,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret], )) } @@ -358,7 +360,7 @@ impl WalletCore { tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, to_proof: MembershipProof, - ) -> Result<(SendTxResponse, [SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 1]), ExecutionFailureKind> { let Ok(from_acc) = self.get_account_public(from).await else { return Err(ExecutionFailureKind::KeyNotFoundError); }; @@ -412,7 +414,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret], )) } @@ -424,7 +426,7 @@ impl WalletCore { instruction_data: InstructionData, tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, - ) -> Result<(SendTxResponse, [SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 1]), ExecutionFailureKind> { let Ok(from_acc) = self.get_account_public(from).await else { return Err(ExecutionFailureKind::KeyNotFoundError); }; @@ -478,7 +480,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret], )) } @@ -491,7 +493,7 @@ impl WalletCore { instruction_data: InstructionData, tx_pre_check: impl FnOnce(&Account, &Account) -> Result<(), ExecutionFailureKind>, program: Program, - ) -> Result { + ) -> Result { let Ok(from_acc) = self.get_account_public(from).await else { return Err(ExecutionFailureKind::KeyNotFoundError); }; @@ -538,13 +540,13 @@ impl WalletCore { let witness_set = WitnessSet::for_message(&message, proof, &[signing_key]); let tx = PrivacyPreservingTransaction::new(message, witness_set); - Ok(self.sequencer_client.send_tx_private(tx).await?) + Ok(self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?) } pub async fn register_account_under_authenticated_transfers_programs_private( &self, from: AccountId, - ) -> Result<(SendTxResponse, [SharedSecretKey; 1]), ExecutionFailureKind> { + ) -> Result<(HashType, [SharedSecretKey; 1]), ExecutionFailureKind> { let AccountPreparedData { nsk: _, npk: from_npk, @@ -585,7 +587,7 @@ impl WalletCore { let tx = PrivacyPreservingTransaction::new(message, witness_set); Ok(( - self.sequencer_client.send_tx_private(tx).await?, + self.sequencer_client.send_transaction(NSSATransaction::PrivacyPreserving(tx).into()).await?, [shared_secret_from], )) }