Merge branch 'main' into schouhy/add-block-context

This commit is contained in:
Sergio Chouhy 2026-03-20 09:49:50 -03:00
commit 607a34058d
126 changed files with 2226 additions and 3575 deletions

View File

@ -12,12 +12,12 @@ jobs:
strategy:
matrix:
include:
- name: sequencer_runner
dockerfile: ./sequencer_runner/Dockerfile
- name: sequencer_service
dockerfile: ./sequencer/service/Dockerfile
build_args: |
STANDALONE=false
- name: sequencer_runner-standalone
dockerfile: ./sequencer_runner/Dockerfile
- name: sequencer_service-standalone
dockerfile: ./sequencer/service/Dockerfile
build_args: |
STANDALONE=true
- name: indexer_service

2
.gitignore vendored
View File

@ -6,7 +6,7 @@ data/
.idea/
.vscode/
rocksdb
sequencer_runner/data/
sequencer/service/data/
storage.json
result
wallet-ffi/wallet_ffi.h

431
Cargo.lock generated
View File

@ -2,229 +2,6 @@
# It is not intended for manual editing.
version = 4
[[package]]
name = "actix"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b"
dependencies = [
"actix-macros",
"actix-rt",
"actix_derive",
"bitflags 2.11.0",
"bytes",
"crossbeam-channel",
"futures-core",
"futures-sink",
"futures-task",
"futures-util",
"log",
"once_cell",
"parking_lot",
"pin-project-lite",
"smallvec",
"tokio",
"tokio-util",
]
[[package]]
name = "actix-codec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a"
dependencies = [
"bitflags 2.11.0",
"bytes",
"futures-core",
"futures-sink",
"memchr",
"pin-project-lite",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "actix-cors"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daa239b93927be1ff123eebada5a3ff23e89f0124ccb8609234e5103d5a5ae6d"
dependencies = [
"actix-utils",
"actix-web",
"derive_more",
"futures-util",
"log",
"once_cell",
"smallvec",
]
[[package]]
name = "actix-http"
version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f860ee6746d0c5b682147b2f7f8ef036d4f92fe518251a3a35ffa3650eafdf0e"
dependencies = [
"actix-codec",
"actix-rt",
"actix-service",
"actix-utils",
"bitflags 2.11.0",
"bytes",
"bytestring",
"derive_more",
"encoding_rs",
"foldhash",
"futures-core",
"http 0.2.12",
"httparse",
"httpdate",
"itoa",
"language-tags",
"mime",
"percent-encoding",
"pin-project-lite",
"smallvec",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "actix-macros"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb"
dependencies = [
"quote",
"syn 2.0.117",
]
[[package]]
name = "actix-router"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14f8c75c51892f18d9c46150c5ac7beb81c95f78c8b83a634d49f4ca32551fe7"
dependencies = [
"bytestring",
"cfg-if",
"http 0.2.12",
"regex-lite",
"serde",
"tracing",
]
[[package]]
name = "actix-rt"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63"
dependencies = [
"futures-core",
"tokio",
]
[[package]]
name = "actix-server"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a65064ea4a457eaf07f2fba30b4c695bf43b721790e9530d26cb6f9019ff7502"
dependencies = [
"actix-rt",
"actix-service",
"actix-utils",
"futures-core",
"futures-util",
"mio",
"socket2 0.5.10",
"tokio",
"tracing",
]
[[package]]
name = "actix-service"
version = "2.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e46f36bf0e5af44bdc4bdb36fbbd421aa98c79a9bce724e1edeb3894e10dc7f"
dependencies = [
"futures-core",
"pin-project-lite",
]
[[package]]
name = "actix-utils"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8"
dependencies = [
"local-waker",
"pin-project-lite",
]
[[package]]
name = "actix-web"
version = "4.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff87453bc3b56e9b2b23c1cc0b1be8797184accf51d2abe0f8a33ec275d316bf"
dependencies = [
"actix-codec",
"actix-http",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
"actix-utils",
"actix-web-codegen",
"bytes",
"bytestring",
"cfg-if",
"derive_more",
"encoding_rs",
"foldhash",
"futures-core",
"futures-util",
"impl-more",
"itoa",
"language-tags",
"log",
"mime",
"once_cell",
"pin-project-lite",
"regex-lite",
"serde",
"serde_json",
"serde_urlencoded",
"smallvec",
"socket2 0.6.3",
"time",
"tracing",
"url",
]
[[package]]
name = "actix-web-codegen"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8"
dependencies = [
"actix-router",
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]]
name = "actix_derive"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6ac1e58cded18cb28ddc17143c4dea5345b3ad575e14f32f66e4054a56eb271"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]]
name = "addchain"
version = "0.2.1"
@ -1011,7 +788,7 @@ dependencies = [
"axum-core 0.4.5",
"bytes",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"hyper",
@ -1045,7 +822,7 @@ dependencies = [
"bytes",
"form_urlencoded",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"hyper",
@ -1080,7 +857,7 @@ dependencies = [
"async-trait",
"bytes",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"mime",
@ -1099,7 +876,7 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
dependencies = [
"bytes",
"futures-core",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"mime",
@ -1313,7 +1090,7 @@ dependencies = [
"futures-util",
"hex",
"home",
"http 1.4.0",
"http",
"http-body-util",
"hyper",
"hyper-named-pipe",
@ -1466,15 +1243,6 @@ dependencies = [
"serde_core",
]
[[package]]
name = "bytestring"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "113b4343b5f6617e7ad401ced8de3cc8b012e73a594347c307b90db3e9271289"
dependencies = [
"bytes",
]
[[package]]
name = "bzip2-sys"
version = "0.1.13+1.0.8"
@ -1732,20 +1500,15 @@ dependencies = [
"anyhow",
"base64 0.22.1",
"borsh",
"bytesize",
"hex",
"log",
"logos-blockchain-common-http-client",
"nssa",
"nssa_core",
"reqwest",
"serde",
"serde_json",
"serde_with",
"sha2",
"thiserror 2.0.18",
"tokio-retry",
"url",
]
[[package]]
@ -1877,15 +1640,6 @@ dependencies = [
"unicode-segmentation",
]
[[package]]
name = "convert_case"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "convert_case"
version = "0.11.0"
@ -1992,15 +1746,6 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b"
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
@ -2297,7 +2042,6 @@ version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
dependencies = [
"convert_case 0.10.0",
"proc-macro2",
"quote",
"rustc_version",
@ -3099,7 +2843,7 @@ dependencies = [
"futures-core",
"futures-sink",
"gloo-utils",
"http 1.4.0",
"http",
"js-sys",
"pin-project",
"serde",
@ -3163,7 +2907,7 @@ dependencies = [
"fnv",
"futures-core",
"futures-sink",
"http 1.4.0",
"http",
"indexmap 2.13.0",
"slab",
"tokio",
@ -3318,17 +3062,6 @@ dependencies = [
"utf8-width",
]
[[package]]
name = "http"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]]
name = "http"
version = "1.4.0"
@ -3346,7 +3079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [
"bytes",
"http 1.4.0",
"http",
]
[[package]]
@ -3357,7 +3090,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
dependencies = [
"bytes",
"futures-core",
"http 1.4.0",
"http",
"http-body",
"pin-project-lite",
]
@ -3432,7 +3165,7 @@ dependencies = [
"futures-channel",
"futures-core",
"h2",
"http 1.4.0",
"http",
"http-body",
"httparse",
"httpdate",
@ -3465,7 +3198,7 @@ version = "0.27.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58"
dependencies = [
"http 1.4.0",
"http",
"hyper",
"hyper-util",
"log",
@ -3516,14 +3249,14 @@ dependencies = [
"bytes",
"futures-channel",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"hyper",
"ipnet",
"libc",
"percent-encoding",
"pin-project-lite",
"socket2 0.6.3",
"socket2",
"system-configuration",
"tokio",
"tower-service",
@ -3684,12 +3417,6 @@ dependencies = [
"icu_properties",
]
[[package]]
name = "impl-more"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2"
[[package]]
name = "include_bytes_aligned"
version = "0.1.4"
@ -3725,7 +3452,6 @@ version = "0.1.0"
dependencies = [
"anyhow",
"arc-swap",
"async-trait",
"clap",
"env_logger",
"futures",
@ -3825,8 +3551,6 @@ name = "integration_tests"
version = "0.1.0"
dependencies = [
"anyhow",
"base64 0.22.1",
"borsh",
"bytesize",
"common",
"env_logger",
@ -3839,7 +3563,8 @@ dependencies = [
"nssa",
"nssa_core",
"sequencer_core",
"sequencer_runner",
"sequencer_service",
"sequencer_service_rpc",
"serde_json",
"tempfile",
"testcontainers",
@ -4048,7 +3773,7 @@ dependencies = [
"futures-channel",
"futures-util",
"gloo-net",
"http 1.4.0",
"http",
"jsonrpsee-core",
"pin-project",
"rustls",
@ -4073,7 +3798,7 @@ dependencies = [
"bytes",
"futures-timer",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"jsonrpsee-types",
@ -4134,7 +3859,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c51b7c290bb68ce3af2d029648148403863b982f138484a73f02a9dd52dbd7f"
dependencies = [
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"hyper",
@ -4160,7 +3885,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc88ff4688e43cc3fa9883a8a95c6fa27aa2e76c96e610b737b6554d650d7fd5"
dependencies = [
"http 1.4.0",
"http",
"serde",
"serde_json",
"thiserror 2.0.18",
@ -4184,7 +3909,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b6fceceeb05301cc4c065ab3bd2fa990d41ff4eb44e4ca1b30fa99c057c3e79"
dependencies = [
"http 1.4.0",
"http",
"jsonrpsee-client-transport",
"jsonrpsee-core",
"jsonrpsee-types",
@ -4238,12 +3963,6 @@ dependencies = [
"thiserror 2.0.18",
]
[[package]]
name = "language-tags"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388"
[[package]]
name = "lazy-regex"
version = "3.6.0"
@ -4620,12 +4339,6 @@ version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
[[package]]
name = "local-waker"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487"
[[package]]
name = "lock_api"
version = "0.4.14"
@ -5384,7 +5097,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys 0.61.2",
]
@ -5398,7 +5110,7 @@ dependencies = [
"bytes",
"encoding_rs",
"futures-util",
"http 1.4.0",
"http",
"httparse",
"memchr",
"mime",
@ -5545,6 +5257,7 @@ dependencies = [
"risc0-zkvm",
"secp256k1",
"serde",
"serde_with",
"sha2",
"test-case",
"test_program_methods",
@ -6148,8 +5861,10 @@ name = "program_deployment"
version = "0.1.0"
dependencies = [
"clap",
"common",
"nssa",
"nssa_core",
"sequencer_service_rpc",
"tokio",
"wallet",
]
@ -6270,7 +5985,7 @@ dependencies = [
"quinn-udp",
"rustc-hash",
"rustls",
"socket2 0.6.3",
"socket2",
"thiserror 2.0.18",
"tokio",
"tracing",
@ -6307,7 +6022,7 @@ dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.3",
"socket2",
"tracing",
"windows-sys 0.60.2",
]
@ -6581,12 +6296,6 @@ dependencies = [
"regex-syntax",
]
[[package]]
name = "regex-lite"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973"
[[package]]
name = "regex-syntax"
version = "0.8.10"
@ -6606,7 +6315,7 @@ dependencies = [
"futures-core",
"futures-util",
"h2",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"hyper",
@ -7453,47 +7162,43 @@ dependencies = [
]
[[package]]
name = "sequencer_rpc"
name = "sequencer_service"
version = "0.1.0"
dependencies = [
"actix-cors",
"actix-web",
"anyhow",
"base58",
"base64 0.22.1",
"bedrock_client",
"borsh",
"bytesize",
"common",
"futures",
"hex",
"itertools 0.14.0",
"log",
"mempool",
"nssa",
"sequencer_core",
"serde",
"serde_json",
"tempfile",
"tokio",
]
[[package]]
name = "sequencer_runner"
version = "0.1.0"
dependencies = [
"actix",
"actix-web",
"anyhow",
"clap",
"common",
"env_logger",
"futures",
"indexer_service_rpc",
"jsonrpsee",
"log",
"mempool",
"nssa",
"sequencer_core",
"sequencer_rpc",
"sequencer_service_protocol",
"sequencer_service_rpc",
"tokio",
"tokio-util",
]
[[package]]
name = "sequencer_service_protocol"
version = "0.1.0"
dependencies = [
"common",
"nssa",
"nssa_core",
]
[[package]]
name = "sequencer_service_rpc"
version = "0.1.0"
dependencies = [
"jsonrpsee",
"sequencer_service_protocol",
]
[[package]]
@ -7689,7 +7394,7 @@ dependencies = [
"const_format",
"futures",
"gloo-net",
"http 1.4.0",
"http",
"http-body-util",
"hyper",
"inventory",
@ -7826,16 +7531,6 @@ dependencies = [
"tokio",
]
[[package]]
name = "socket2"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "socket2"
version = "0.6.3"
@ -7855,7 +7550,7 @@ dependencies = [
"base64 0.22.1",
"bytes",
"futures",
"http 1.4.0",
"http",
"httparse",
"log",
"rand 0.8.5",
@ -8161,7 +7856,7 @@ dependencies = [
"etcetera",
"ferroid",
"futures",
"http 1.4.0",
"http",
"itertools 0.14.0",
"log",
"memchr",
@ -8321,7 +8016,7 @@ dependencies = [
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.6.3",
"socket2",
"tokio-macros",
"windows-sys 0.61.2",
]
@ -8518,7 +8213,7 @@ dependencies = [
"base64 0.22.1",
"bytes",
"h2",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"hyper",
@ -8526,7 +8221,7 @@ dependencies = [
"hyper-util",
"percent-encoding",
"pin-project",
"socket2 0.6.3",
"socket2",
"sync_wrapper",
"tokio",
"tokio-stream",
@ -8576,7 +8271,7 @@ dependencies = [
"bytes",
"futures-core",
"futures-util",
"http 1.4.0",
"http",
"http-body",
"http-body-util",
"http-range-header",
@ -8678,7 +8373,7 @@ checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442"
dependencies = [
"bytes",
"data-encoding",
"http 1.4.0",
"http",
"httparse",
"log",
"rand 0.9.2",
@ -8860,7 +8555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f"
dependencies = [
"base64 0.22.1",
"http 1.4.0",
"http",
"httparse",
"log",
]
@ -8955,8 +8650,6 @@ dependencies = [
"anyhow",
"async-stream",
"base58",
"base64 0.22.1",
"borsh",
"clap",
"common",
"env_logger",
@ -8972,9 +8665,11 @@ dependencies = [
"nssa_core",
"optfield",
"rand 0.8.5",
"sequencer_service_rpc",
"serde",
"serde_json",
"sha2",
"thiserror 2.0.18",
"token_core",
"tokio",
"url",
@ -8985,9 +8680,9 @@ name = "wallet-ffi"
version = "0.1.0"
dependencies = [
"cbindgen",
"common",
"nssa",
"nssa_core",
"sequencer_service_rpc",
"tempfile",
"tokio",
"wallet",

View File

@ -17,9 +17,10 @@ members = [
"programs/amm",
"programs/token/core",
"programs/token",
"sequencer_core",
"sequencer_rpc",
"sequencer_runner",
"sequencer/core",
"sequencer/service",
"sequencer/service/protocol",
"sequencer/service/rpc",
"indexer/core",
"indexer/service",
"indexer/service/protocol",
@ -42,9 +43,10 @@ common = { path = "common" }
mempool = { path = "mempool" }
storage = { path = "storage" }
key_protocol = { path = "key_protocol" }
sequencer_core = { path = "sequencer_core" }
sequencer_rpc = { path = "sequencer_rpc" }
sequencer_runner = { path = "sequencer_runner" }
sequencer_core = { path = "sequencer/core" }
sequencer_service_protocol = { path = "sequencer/service/protocol" }
sequencer_service_rpc = { path = "sequencer/service/rpc" }
sequencer_service = { path = "sequencer/service" }
indexer_core = { path = "indexer/core" }
indexer_service = { path = "indexer/service" }
indexer_service_protocol = { path = "indexer/service/protocol" }

View File

@ -30,10 +30,10 @@ run-bedrock:
docker compose up
# Run Sequencer
[working-directory: 'sequencer_runner']
[working-directory: 'sequencer/service']
run-sequencer:
@echo "🧠 Running sequencer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_runner configs/debug
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_service configs/debug/sequencer_config.json
# Run Indexer
[working-directory: 'indexer/service']
@ -62,8 +62,8 @@ run-wallet +args:
# Clean runtime data
clean:
@echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key
rm -rf sequencer_runner/rocksdb
rm -rf sequencer/service/bedrock_signing_key
rm -rf sequencer/service/rocksdb
rm -rf indexer/service/rocksdb
rm -rf wallet/configs/debug/storage.json
rm -rf rocksdb

View File

@ -161,7 +161,7 @@ The sequencer and logos blockchain node can be run locally:
- `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json`
3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer:
- `RUST_LOG=info cargo run -p sequencer_runner sequencer_runner/configs/debug`
- `RUST_LOG=info cargo run -p sequencer_service sequencer/service/configs/debug/sequencer_config.json`
4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following:
- `cargo install cargo-leptos`
- `cargo leptos build --release`
@ -171,8 +171,8 @@ The sequencer and logos blockchain node can be run locally:
After stopping services above you need to remove 3 folders to start cleanly:
1. In the `logos-blockchain/logos-blockchain` folder `state` (not needed in case of docker setup)
2. In the `lssa` folder `sequencer_runner/rocksdb`
3. In the `lssa` file `sequencer_runner/bedrock_signing_key`
2. In the `lssa` folder `sequencer/service/rocksdb`
3. In the `lssa` file `sequencer/service/bedrock_signing_key`
4. In the `lssa` folder `indexer/service/rocksdb`
### Normal mode (`just` commands)
@ -220,7 +220,7 @@ This will use a wallet binary built from this repo and not the one installed in
### Standalone mode
The sequencer can be run in standalone mode with:
```bash
RUST_LOG=info cargo run --features standalone -p sequencer_runner sequencer_runner/configs/debug
RUST_LOG=info cargo run --features standalone -p sequencer_service sequencer/service/configs/debug
```
## Running with Docker

View File

@ -13,16 +13,11 @@ nssa_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true
serde_json.workspace = true
serde.workspace = true
serde_with.workspace = true
reqwest.workspace = true
base64.workspace = true
sha2.workspace = true
log.workspace = true
hex.workspace = true
borsh.workspace = true
bytesize.workspace = true
base64.workspace = true
url.workspace = true
logos-blockchain-common-http-client.workspace = true
tokio-retry.workspace = true

View File

@ -60,6 +60,18 @@ pub struct Block {
pub bedrock_parent_id: MantleMsgId,
}
impl Serialize for Block {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for Block {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct HashableBlockData {
pub block_id: BlockId,

View File

@ -0,0 +1,25 @@
//! This module provides utilities for serializing and deserializing data by combining Borsh and
//! Base64 encodings.
use base64::{Engine as _, engine::general_purpose::STANDARD};
use borsh::{BorshDeserialize, BorshSerialize};
use serde::{Deserialize, Serialize};
pub fn serialize<T: BorshSerialize, S: serde::Serializer>(
value: &T,
serializer: S,
) -> Result<S::Ok, S::Error> {
let borsh_encoded = borsh::to_vec(value).map_err(serde::ser::Error::custom)?;
let base64_encoded = STANDARD.encode(&borsh_encoded);
Serialize::serialize(&base64_encoded, serializer)
}
pub fn deserialize<'de, T: BorshDeserialize, D: serde::Deserializer<'de>>(
deserializer: D,
) -> Result<T, D::Error> {
let base64_encoded = <String as Deserialize>::deserialize(deserializer)?;
let borsh_encoded = STANDARD
.decode(base64_encoded.as_bytes())
.map_err(serde::de::Error::custom)?;
borsh::from_slice(&borsh_encoded).map_err(serde::de::Error::custom)
}

View File

@ -1,43 +0,0 @@
use nssa::AccountId;
use serde::Deserialize;
use crate::rpc_primitives::errors::RpcError;
#[derive(Debug, Clone, Deserialize)]
pub struct SequencerRpcError {
pub jsonrpc: String,
pub error: RpcError,
pub id: u64,
}
#[derive(thiserror::Error, Debug)]
pub enum SequencerClientError {
#[error("HTTP error")]
HTTPError(#[from] reqwest::Error),
#[error("Serde error")]
SerdeError(#[from] serde_json::Error),
#[error("Internal error: {0:?}")]
InternalError(SequencerRpcError),
}
impl From<SequencerRpcError> for SequencerClientError {
fn from(value: SequencerRpcError) -> Self {
Self::InternalError(value)
}
}
#[derive(Debug, thiserror::Error)]
pub enum ExecutionFailureKind {
#[error("Failed to get data from sequencer")]
SequencerError(#[source] anyhow::Error),
#[error("Inputs amounts does not match outputs")]
AmountMismatchError,
#[error("Accounts key not found")]
KeyNotFoundError,
#[error("Sequencer client error: {0:?}")]
SequencerClientError(#[from] SequencerClientError),
#[error("Can not pay for operation")]
InsufficientFundsError,
#[error("Account {0} data is invalid")]
AccountDataError(AccountId),
}

View File

@ -4,10 +4,8 @@ use borsh::{BorshDeserialize, BorshSerialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
pub mod block;
mod borsh_base64;
pub mod config;
pub mod error;
pub mod rpc_primitives;
pub mod sequencer_client;
pub mod transaction;
// Module for tests utility functions

View File

@ -1,194 +0,0 @@
use std::fmt;
use serde_json::{Value, to_value};
#[derive(serde::Serialize)]
pub struct RpcParseError(pub String);
/// This struct may be returned from JSON RPC server in case of error.
///
/// It is expected that that this struct has impls From<_> all other RPC errors
/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError).
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct RpcError {
#[serde(flatten)]
pub error_struct: Option<RpcErrorKind>,
/// Deprecated please use the `error_struct` instead.
pub code: i64,
/// Deprecated please use the `error_struct` instead.
pub message: String,
/// Deprecated please use the `error_struct` instead.
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Value>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "cause", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcErrorKind {
RequestValidationError(RpcRequestValidationErrorKind),
HandlerError(Value),
InternalError(Value),
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcRequestValidationErrorKind {
MethodNotFound { method_name: String },
ParseError { error_message: String },
}
/// A general Server Error.
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum ServerError {
Timeout,
Closed,
}
impl RpcError {
/// A generic constructor.
///
/// Mostly for completeness, doesn't do anything but filling in the corresponding fields.
#[must_use]
pub const fn new(code: i64, message: String, data: Option<Value>) -> Self {
Self {
code,
message,
data,
error_struct: None,
}
}
/// Create an Invalid Param error.
pub fn invalid_params(data: impl serde::Serialize) -> Self {
let value = match to_value(data) {
Ok(value) => value,
Err(err) => {
return Self::server_error(Some(format!(
"Failed to serialize invalid parameters error: {:?}",
err.to_string()
)));
}
};
Self::new(-32_602, "Invalid params".to_owned(), Some(value))
}
/// Create a server error.
pub fn server_error<E: serde::Serialize>(e: Option<E>) -> Self {
Self::new(
-32_000,
"Server error".to_owned(),
e.map(|v| to_value(v).expect("Must be representable in JSON")),
)
}
/// Create a parse error.
#[must_use]
pub fn parse_error(e: String) -> Self {
Self {
code: -32_700,
message: "Parse error".to_owned(),
data: Some(Value::String(e.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::ParseError { error_message: e },
)),
}
}
#[must_use]
pub fn serialization_error(e: &str) -> Self {
Self::new_internal_error(Some(Value::String(e.to_owned())), e)
}
/// Helper method to define extract `INTERNAL_ERROR` in separate `RpcErrorKind`
/// Returns `HANDLER_ERROR` if the error is not internal one.
#[must_use]
pub fn new_internal_or_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
if error_struct["name"] == "INTERNAL_ERROR" {
let error_message = match error_struct["info"].get("error_message") {
Some(Value::String(error_message)) => error_message.as_str(),
_ => "InternalError happened during serializing InternalError",
};
Self::new_internal_error(error_data, error_message)
} else {
Self::new_handler_error(error_data, error_struct)
}
}
#[must_use]
pub fn new_internal_error(error_data: Option<Value>, info: &str) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::InternalError(serde_json::json!({
"name": "INTERNAL_ERROR",
"info": serde_json::json!({"error_message": info})
}))),
}
}
fn new_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::HandlerError(error_struct)),
}
}
/// Create a method not found error.
#[must_use]
pub fn method_not_found(method: String) -> Self {
Self {
code: -32_601,
message: "Method not found".to_owned(),
data: Some(Value::String(method.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::MethodNotFound {
method_name: method,
},
)),
}
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
}
}
impl From<RpcParseError> for RpcError {
fn from(parse_error: RpcParseError) -> Self {
Self::parse_error(parse_error.0)
}
}
impl From<std::convert::Infallible> for RpcError {
fn from(_: std::convert::Infallible) -> Self {
// SAFETY: Infallible error can never be constructed, so this code can never be reached.
unsafe { core::hint::unreachable_unchecked() }
}
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Timeout => write!(f, "ServerError: Timeout"),
Self::Closed => write!(f, "ServerError: Closed"),
}
}
}
impl From<ServerError> for RpcError {
fn from(e: ServerError) -> Self {
let error_data = match to_value(&e) {
Ok(value) => value,
Err(_err) => {
return Self::new_internal_error(None, "Failed to serialize ServerError");
}
};
Self::new_internal_error(Some(error_data), e.to_string().as_str())
}
}

View File

@ -1,588 +0,0 @@
// Copyright 2017 tokio-jsonrpc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! JSON-RPC 2.0 messages.
//!
//! The main entrypoint here is the [Message](enum.Message.html). The others are just building
//! blocks and you should generally work with `Message` instead.
use std::fmt::{Formatter, Result as FmtResult};
use serde::{
de::{Deserializer, Error, Unexpected, Visitor},
ser::{SerializeStruct as _, Serializer},
};
use serde_json::{Result as JsonResult, Value};
use super::errors::RpcError;
pub type Parsed = Result<Message, Broken>;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct Version;
impl serde::Serialize for Version {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str("2.0")
}
}
impl<'de> serde::Deserialize<'de> for Version {
#[expect(
clippy::renamed_function_params,
reason = "More readable than original serde parameter names"
)]
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct VersionVisitor;
impl Visitor<'_> for VersionVisitor {
type Value = Version;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
formatter.write_str("a version string")
}
fn visit_str<E: Error>(self, value: &str) -> Result<Version, E> {
match value {
"2.0" => Ok(Version),
_ => Err(E::invalid_value(Unexpected::Str(value), &"value 2.0")),
}
}
}
deserializer.deserialize_str(VersionVisitor)
}
}
/// An RPC request.
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
pub struct Request {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
pub id: Value,
}
impl Request {
#[must_use]
pub fn from_payload_version_2_0(method: String, payload: serde_json::Value) -> Self {
Self {
jsonrpc: Version,
method,
params: payload,
// ToDo: Correct checking of id
id: 1.into(),
}
}
/// Answer the request with a (positive) reply.
///
/// The ID is taken from the request.
#[must_use]
pub fn reply(&self, reply: Value) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Ok(reply),
id: self.id.clone(),
})
}
/// Answer the request with an error.
#[must_use]
pub fn error(&self, error: RpcError) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Err(error),
id: self.id.clone(),
})
}
}
/// A response to an RPC.
///
/// It is created by the methods on [Request](struct.Request.html).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Response {
jsonrpc: Version,
pub result: Result<Value, RpcError>,
pub id: Value,
}
impl serde::Serialize for Response {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut sub = serializer.serialize_struct("Response", 3)?;
sub.serialize_field("jsonrpc", &self.jsonrpc)?;
match &self.result {
Ok(value) => sub.serialize_field("result", value),
Err(err) => sub.serialize_field("error", err),
}?;
sub.serialize_field("id", &self.id)?;
sub.end()
}
}
/// A helper trick for deserialization.
#[derive(serde::Deserialize)]
#[serde(deny_unknown_fields)]
struct WireResponse {
// It is actually used to eat and sanity check the deserialized text
#[serde(rename = "jsonrpc")]
_jsonrpc: Version,
// Make sure we accept null as Some(Value::Null), instead of going to None
#[serde(default, deserialize_with = "some_value")]
result: Option<Value>,
error: Option<RpcError>,
id: Value,
}
// Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar
// structure that directly corresponds to whatever is on the wire and then convert it to our more
// convenient representation.
impl<'de> serde::Deserialize<'de> for Response {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let wr: WireResponse = serde::Deserialize::deserialize(deserializer)?;
let result = match (wr.result, wr.error) {
(Some(res), None) => Ok(res),
(None, Some(err)) => Err(err),
_ => {
let err = D::Error::custom("Either 'error' or 'result' is expected, but not both");
return Err(err);
}
};
Ok(Self {
jsonrpc: Version,
result,
id: wr.id,
})
}
}
/// A notification (doesn't expect an answer).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Notification {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
}
/// One message of the JSON RPC protocol.
///
/// One message, directly mapped from the structures of the protocol. See the
/// [specification](http://www.jsonrpc.org/specification) for more details.
///
/// Since the protocol allows one endpoint to be both client and server at the same time, the
/// message can decode and encode both directions of the protocol.
///
/// The `Batch` variant is supposed to be created directly, without a constructor.
///
/// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests
/// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level
/// element, it is returned as `Err(Broken::Unmatched)`.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
pub enum Message {
/// An RPC request.
Request(Request),
/// A response to a Request.
Response(Response),
/// A notification.
Notification(Notification),
/// A batch of more requests or responses.
///
/// The protocol allows bundling multiple requests, notifications or responses to a single
/// message.
///
/// This variant has no direct constructor and is expected to be constructed manually.
Batch(Vec<Self>),
/// An unmatched sub entry in a `Batch`.
///
/// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one
/// is represented by this. This is never produced as a top-level value when parsing, the
/// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize.
#[serde(skip_serializing)]
UnmatchedSub(Value),
}
impl Message {
/// A constructor for a request.
///
/// The ID is auto-set to dontcare.
#[must_use]
pub fn request(method: String, params: Value) -> Self {
let id = Value::from("dontcare");
Self::Request(Request {
jsonrpc: Version,
method,
params,
id,
})
}
/// Create a top-level error (without an ID).
#[must_use]
pub const fn error(error: RpcError) -> Self {
Self::Response(Response {
jsonrpc: Version,
result: Err(error),
id: Value::Null,
})
}
/// A constructor for a notification.
#[must_use]
pub const fn notification(method: String, params: Value) -> Self {
Self::Notification(Notification {
jsonrpc: Version,
method,
params,
})
}
/// A constructor for a response.
#[must_use]
pub const fn response(id: Value, result: Result<Value, RpcError>) -> Self {
Self::Response(Response {
jsonrpc: Version,
result,
id,
})
}
/// Returns id or Null if there is no id.
#[must_use]
pub fn id(&self) -> Value {
match self {
Self::Request(req) => req.id.clone(),
Self::Response(response) => response.id.clone(),
Self::Notification(_) | Self::Batch(_) | Self::UnmatchedSub(_) => Value::Null,
}
}
}
impl From<Message> for String {
fn from(val: Message) -> Self {
::serde_json::ser::to_string(&val).expect("message serialization to json should not fail")
}
}
impl From<Message> for Vec<u8> {
fn from(val: Message) -> Self {
::serde_json::ser::to_vec(&val)
.expect("message serialization to json bytes should not fail")
}
}
/// A broken message.
///
/// Protocol-level errors.
#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)]
#[serde(untagged)]
pub enum Broken {
/// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message.
Unmatched(Value),
/// Invalid JSON.
#[serde(skip_deserializing)]
SyntaxError(String),
}
impl Broken {
/// Generate an appropriate error message.
///
/// The error message for these things are specified in the RFC, so this just creates an error
/// with the right values.
#[must_use]
pub fn reply(&self) -> Message {
match self {
Self::Unmatched(_) => Message::error(RpcError::parse_error(
"JSON RPC Request format was expected".to_owned(),
)),
Self::SyntaxError(e) => Message::error(RpcError::parse_error(e.clone())),
}
}
}
/// A trick to easily deserialize and detect valid JSON, but invalid Message.
#[derive(serde::Deserialize)]
#[serde(untagged)]
pub enum WireMessage {
Message(Message),
Broken(Broken),
}
pub fn decoded_to_parsed(res: JsonResult<WireMessage>) -> Parsed {
match res {
Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)),
Ok(WireMessage::Message(m)) => Ok(m),
Ok(WireMessage::Broken(b)) => Err(b),
Err(e) => Err(Broken::SyntaxError(e.to_string())),
}
}
/// Read a [Message](enum.Message.html) from a slice.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_slice(s: &[u8]) -> Parsed {
decoded_to_parsed(::serde_json::de::from_slice(s))
}
/// Read a [Message](enum.Message.html) from a string.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_str(s: &str) -> Parsed {
from_slice(s.as_bytes())
}
/// Deserializer for `Option<Value>` that produces `Some(Value::Null)`.
///
/// The usual one produces None in that case. But we need to know the difference between
/// `{x: null}` and `{}`.
fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Option<Value>, D::Error> {
serde::Deserialize::deserialize(deserializer).map(Some)
}
#[cfg(test)]
mod tests {
use serde_json::{Value, de::from_slice, json, ser::to_vec};
use super::*;
/// Test serialization and deserialization of the Message.
///
/// We first deserialize it from a string. That way we check deserialization works.
/// But since serialization doesn't have to produce the exact same result (order, spaces, …),
/// we then serialize and deserialize the thing again and check it matches.
#[test]
fn message_serde() {
// A helper for running one message test
fn one(input: &str, expected: &Message) {
let parsed: Message = from_str(input).unwrap();
assert_eq!(*expected, parsed);
let serialized = to_vec(&parsed).unwrap();
let deserialized: Message = from_slice(&serialized).unwrap();
assert_eq!(parsed, deserialized);
}
// A request without parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(1),
}),
);
// A request with parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: json!([1, 2, 3]),
id: json!(2),
}),
);
// A notification (with parameters)
one(
r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#,
&Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: json!({"x": "y"}),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(json!(42)),
id: json!(3),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(Value::Null),
id: json!(3),
}),
);
// An error
one(
r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: Value::Null,
}),
);
// A batch
one(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42}
]"#,
&Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
]),
);
// Some handling of broken messages inside a batch
let parsed = from_str(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42},
true
]"#,
)
.unwrap();
assert_eq!(
Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
Message::UnmatchedSub(Value::Bool(true)),
]),
parsed
);
to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err();
}
/// A helper for the `broken` test.
///
/// Check that the given JSON string parses, but is not recognized as a valid RPC message.
///
/// Test things that are almost but not entirely JSONRPC are rejected.
///
/// The reject is done by returning it as Unmatched.
#[test]
fn broken() {
// A helper with one test
fn one(input: &str) {
let msg = from_str(input);
match msg {
Err(Broken::Unmatched(_)) => (),
_ => panic!("{input} recognized as an RPC message: {msg:?}!"),
}
}
// Missing the version
one(r#"{"method": "notif"}"#);
// Wrong version
one(r#"{"jsonrpc": 2.0, "method": "notif"}"#);
// A response with both result and error
one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#);
// A response without an id
one(r#"{"jsonrpc": "2.0", "result": 42}"#);
// An extra field
one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#);
// Something completely different
one(r#"{"x": [1, 2, 3]}"#);
match from_str("{]") {
Err(Broken::SyntaxError(_)) => (),
other => panic!("Something unexpected: {other:?}"),
}
}
/// Test some non-trivial aspects of the constructors.
///
/// This doesn't have a full coverage, because there's not much to actually test there.
/// Most of it is related to the ids.
#[test]
#[ignore = "Not a full coverage test"]
fn constructors() {
let msg1 = Message::request("call".to_owned(), json!([1, 2, 3]));
let msg2 = Message::request("call".to_owned(), json!([1, 2, 3]));
// They differ, even when created with the same parameters
assert_ne!(msg1, msg2);
// And, specifically, they differ in the ID's
let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) {
assert_ne!(req1.id, req2.id);
assert!(req1.id.is_string());
assert!(req2.id.is_string());
(req1, req2)
} else {
panic!("Non-request received");
};
let id1 = req1.id.clone();
// When we answer a message, we get the same ID
if let Message::Response(resp) = req1.reply(json!([1, 2, 3])) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Ok(json!([1, 2, 3])),
id: id1
}
);
} else {
panic!("Not a response");
}
let id2 = req2.id.clone();
// The same with an error
if let Message::Response(resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: id2,
}
);
} else {
panic!("Not a response");
}
// When we have unmatched, we generate a top-level error with Null id.
if let Message::Response(resp) =
Message::error(RpcError::new(43, "Also wrong!".to_owned(), None))
{
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)),
id: Value::Null,
}
);
} else {
panic!("Not a response");
}
}
}

View File

@ -1,57 +0,0 @@
use bytesize::ByteSize;
use serde::{Deserialize, Serialize};
pub mod errors;
pub mod message;
pub mod parser;
pub mod requests;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcLimitsConfig {
/// Maximum byte size of the json payload.
pub json_payload_max_size: ByteSize,
}
impl Default for RpcLimitsConfig {
fn default() -> Self {
Self {
json_payload_max_size: ByteSize::mib(10),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
pub addr: String,
pub cors_allowed_origins: Vec<String>,
#[serde(default)]
pub limits_config: RpcLimitsConfig,
}
impl Default for RpcConfig {
fn default() -> Self {
Self {
addr: "0.0.0.0:3040".to_owned(),
cors_allowed_origins: vec!["*".to_owned()],
limits_config: RpcLimitsConfig::default(),
}
}
}
impl RpcConfig {
#[must_use]
pub fn new(addr: &str) -> Self {
Self {
addr: addr.to_owned(),
..Default::default()
}
}
#[must_use]
pub fn with_port(port: u16) -> Self {
Self {
addr: format!("0.0.0.0:{port}"),
..Default::default()
}
}
}

View File

@ -1,29 +0,0 @@
use serde::de::DeserializeOwned;
use serde_json::Value;
use super::errors::RpcParseError;
#[macro_export]
macro_rules! parse_request {
($request_name:ty) => {
impl RpcRequest for $request_name {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError> {
parse_params::<Self>(value)
}
}
};
}
pub trait RpcRequest: Sized {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError>;
}
pub fn parse_params<T: DeserializeOwned>(value: Option<Value>) -> Result<T, RpcParseError> {
value.map_or_else(
|| Err(RpcParseError("Require at least one parameter".to_owned())),
|value| {
serde_json::from_value(value)
.map_err(|err| RpcParseError(format!("Failed parsing args: {err}")))
},
)
}

View File

@ -1,219 +0,0 @@
use std::collections::HashMap;
use nssa::AccountId;
use nssa_core::program::ProgramId;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::{
errors::RpcParseError,
parser::{RpcRequest, parse_params},
};
use crate::{HashType, parse_request};
mod base64_deser {
use base64::{Engine as _, engine::general_purpose};
use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _};
pub mod vec {
use super::*;
pub fn serialize<S>(bytes_vec: &[Vec<u8>], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?;
for bytes in bytes_vec {
let s = general_purpose::STANDARD.encode(bytes);
seq.serialize_element(&s)?;
}
seq.end()
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error>
where
D: Deserializer<'de>,
{
let base64_strings: Vec<String> = Deserialize::deserialize(deserializer)?;
base64_strings
.into_iter()
.map(|s| {
general_purpose::STANDARD
.decode(&s)
.map_err(serde::de::Error::custom)
})
.collect()
}
}
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let base64_string = general_purpose::STANDARD.encode(bytes);
serializer.serialize_str(&base64_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let base64_string: String = Deserialize::deserialize(deserializer)?;
general_purpose::STANDARD
.decode(&base64_string)
.map_err(serde::de::Error::custom)
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountRequest {
pub account_id: [u8; 32],
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxRequest {
#[serde(with = "base64_deser")]
pub transaction: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataRequest {
pub block_id: u64,
}
/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive).
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataRequest {
pub start_block_id: u64,
pub end_block_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetInitialTestnetAccountsRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashRequest {
pub hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesRequest {
pub account_ids: Vec<AccountId>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentRequest {
pub commitment: nssa_core::Commitment,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsRequest;
parse_request!(HelloRequest);
parse_request!(RegisterAccountRequest);
parse_request!(SendTxRequest);
parse_request!(GetBlockDataRequest);
parse_request!(GetBlockRangeDataRequest);
parse_request!(GetGenesisIdRequest);
parse_request!(GetLastBlockRequest);
parse_request!(GetInitialTestnetAccountsRequest);
parse_request!(GetAccountBalanceRequest);
parse_request!(GetTransactionByHashRequest);
parse_request!(GetAccountsNoncesRequest);
parse_request!(GetProofForCommitmentRequest);
parse_request!(GetAccountRequest);
parse_request!(GetProgramIdsRequest);
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloResponse {
pub greeting: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountResponse {
pub status: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxResponse {
pub status: String,
pub tx_hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataResponse {
#[serde(with = "base64_deser")]
pub block: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataResponse {
#[serde(with = "base64_deser::vec")]
pub blocks: Vec<Vec<u8>>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdResponse {
pub genesis_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockResponse {
pub last_block: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceResponse {
pub balance: u128,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesResponse {
pub nonces: Vec<u128>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashResponse {
pub transaction: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountResponse {
pub account: nssa::Account,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentResponse {
pub membership_proof: Option<nssa_core::MembershipProof>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsResponse {
pub program_ids: HashMap<String, ProgramId>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GetInitialTestnetAccountsResponse {
/// Hex encoded account id.
pub account_id: String,
pub balance: u64,
}

View File

@ -12,6 +12,18 @@ pub enum NSSATransaction {
ProgramDeployment(nssa::ProgramDeploymentTransaction),
}
impl Serialize for NSSATransaction {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for NSSATransaction {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
impl NSSATransaction {
#[must_use]
pub fn hash(&self) -> HashType {
@ -90,7 +102,7 @@ impl From<nssa::ProgramDeploymentTransaction> for NSSATransaction {
}
#[derive(
Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize,
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
)]
pub enum TxKind {
Public,

View File

@ -1,6 +1,5 @@
{
"home": "/var/lib/sequencer_runner",
"override_rust_log": null,
"home": "/var/lib/sequencer_service",
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
@ -8,7 +7,6 @@
"mempool_max_size": 10000,
"block_create_timeout": "10s",
"retry_pending_blocks_timeout": "7s",
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay": "100ms",

View File

@ -7,12 +7,12 @@ services:
environment:
- RUST_LOG=error
sequencer_runner:
sequencer_service:
depends_on:
- logos-blockchain-node-0
- indexer_service
volumes: !override
- ./configs/docker-all-in-one/sequencer:/etc/sequencer_runner
- ./configs/docker-all-in-one/sequencer:/etc/sequencer_service
indexer_service:
depends_on:

View File

@ -6,7 +6,7 @@ include:
- path:
bedrock/docker-compose.yml
- path:
sequencer_runner/docker-compose.yml
sequencer/service/docker-compose.yml
- path:
indexer/service/docker-compose.yml
- path:

View File

@ -8,8 +8,10 @@ license = { workspace = true }
workspace = true
[dependencies]
common.workspace = true
nssa.workspace = true
nssa_core.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet.workspace = true
tokio = { workspace = true, features = ["macros"] }

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `hello_world.rs` guest program with:
@ -58,7 +60,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -54,7 +56,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -1,9 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use nssa_core::account::Nonce;
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `hello_world_with_authorization.rs` guest program with:
@ -63,13 +64,7 @@ async fn main() {
.await
.expect("Node should be reachable to query account data");
let signing_keys = [signing_key];
let message = Message::try_new(
program.id(),
vec![account_id],
nonces.iter().map(|x| Nonce(*x)).collect(),
greeting,
)
.unwrap();
let message = Message::try_new(program.id(), vec![account_id], nonces, greeting).unwrap();
// Pass the signing key to sign the message. This will be used by the node
// to flag the pre_state as `is_authorized` when executing the program
let witness_set = WitnessSet::for_message(&message, &signing_keys);
@ -78,7 +73,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -3,12 +3,14 @@
reason = "This is an example program, it's fine to print to stdout"
)]
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use nssa_core::program::PdaSeed;
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -56,7 +58,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();

View File

@ -1,5 +1,7 @@
use clap::{Parser, Subcommand};
use common::transaction::NSSATransaction;
use nssa::{PublicTransaction, program::Program, public_transaction};
use sequencer_service_rpc::RpcClient as _;
use wallet::{PrivacyPreservingAccount, WalletCore};
// Before running this example, compile the `hello_world_with_move_function.rs` guest program with:
@ -87,7 +89,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}
@ -126,7 +128,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -41,12 +41,12 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as hash
if let Ok(hash) = HashType::from_str(&query) {
// Try as block hash
if let Ok(block) = client.get_block_by_hash(hash).await {
if let Ok(Some(block)) = client.get_block_by_hash(hash).await {
blocks.push(block);
}
// Try as transaction hash
if let Ok(tx) = client.get_transaction(hash).await {
if let Ok(Some(tx)) = client.get_transaction(hash).await {
transactions.push(tx);
}
}
@ -60,7 +60,7 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as block ID
if let Ok(block_id) = query.parse::<u64>()
&& let Ok(block) = client.get_block_by_id(block_id).await
&& let Ok(Some(block)) = client.get_block_by_id(block_id).await
{
blocks.push(block);
}
@ -81,6 +81,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result<Block, ServerFnError>
.get_block_by_id(block_id)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
}
/// Get latest block ID
@ -103,6 +104,7 @@ pub async fn get_block_by_hash(block_hash: HashType) -> Result<Block, ServerFnEr
.get_block_by_hash(block_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
}
/// Get transaction by hash
@ -114,6 +116,9 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
.get_transaction(tx_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| {
opt.ok_or_else(|| ServerFnError::ServerError("Transaction not found".to_owned()))
})
}
/// Get blocks with pagination

View File

@ -84,7 +84,7 @@ pub fn TransactionPage() -> impl IntoView {
} = witness_set;
let program_id_str = program_id.to_string();
let proof_len = proof.0.len();
let proof_len = proof.map_or(0, |p| p.0.len());
let signatures_count = signatures_and_public_keys.len();
view! {
@ -190,7 +190,7 @@ pub fn TransactionPage() -> impl IntoView {
(None, None) => "unbounded".to_owned(),
};
let proof_len = proof.0.len();
let proof_len = proof.map_or(0, |p| p.0.len());
view! {
<div class="transaction-details">
<h2>"Privacy-Preserving Transaction Details"</h2>

View File

@ -28,4 +28,3 @@ async-stream.workspace = true
[dev-dependencies]
tempfile.workspace = true

View File

@ -46,7 +46,7 @@ impl IndexerStore {
Ok(self.dbio.get_meta_last_block_in_db()?)
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>> {
Ok(self.dbio.get_block(id)?)
}
@ -54,20 +54,25 @@ impl IndexerStore {
Ok(self.dbio.get_block_batch(before, limit)?)
}
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> {
let block = self.get_block_at_id(self.dbio.get_block_id_by_tx_hash(tx_hash)?)?;
let transaction = block
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<Option<NSSATransaction>> {
let Some(block_id) = self.dbio.get_block_id_by_tx_hash(tx_hash)? else {
return Ok(None);
};
let Some(block) = self.get_block_at_id(block_id)? else {
return Ok(None);
};
Ok(block
.body
.transactions
.iter()
.find(|enc_tx| enc_tx.hash().0 == tx_hash)
.ok_or_else(|| anyhow::anyhow!("Transaction not found in DB"))?;
Ok(transaction.clone())
.into_iter()
.find(|enc_tx| enc_tx.hash().0 == tx_hash))
}
pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Block> {
self.get_block_at_id(self.dbio.get_block_id_by_hash(hash)?)
pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Option<Block>> {
let Some(id) = self.dbio.get_block_id_by_hash(hash)? else {
return Ok(None);
};
self.get_block_at_id(id)
}
pub fn get_transactions_by_account(
@ -171,7 +176,7 @@ mod tests {
)
.unwrap();
let block = storage.get_block_at_id(1).unwrap();
let block = storage.get_block_at_id(1).unwrap().unwrap();
let final_id = storage.get_last_block_id().unwrap();
assert_eq!(block.header.hash, genesis_block().header.hash);

View File

@ -21,7 +21,6 @@ log.workspace = true
jsonrpsee.workspace = true
serde_json.workspace = true
futures.workspace = true
async-trait = "0.1.89"
arc-swap = "1.8.1"
[features]

View File

@ -11,50 +11,50 @@
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
"npk": [
139,
19,
158,
11,
87,
38,
254,
159,
155,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
85,
206,
132,
228,
220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
],
"account": {
"program_owner": [
0,
@ -73,38 +73,38 @@
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
173,
134,
135,
210,
143,
87,
232,
33,
223,
54,
226,
10,
71,
215,
128,
194,
120,
113,
224,
4,
165
254,
143,
172,
24,
244,
243,
208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
],
"account": {
"program_owner": [
@ -157,4 +157,4 @@
37,
37
]
}
}

View File

@ -363,12 +363,16 @@ impl From<ProgramDeploymentMessage> for nssa::program_deployment_transaction::Me
// WitnessSet conversions
// ============================================================================
impl TryFrom<nssa::public_transaction::WitnessSet> for WitnessSet {
type Error = ();
fn try_from(_value: nssa::public_transaction::WitnessSet) -> Result<Self, Self::Error> {
// Public transaction witness sets don't have proofs, so we can't convert them directly
Err(())
impl From<nssa::public_transaction::WitnessSet> for WitnessSet {
fn from(value: nssa::public_transaction::WitnessSet) -> Self {
Self {
signatures_and_public_keys: value
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: None,
}
}
}
@ -380,7 +384,7 @@ impl From<nssa::privacy_preserving_transaction::witness_set::WitnessSet> for Wit
.into_iter()
.map(|(sig, pk)| (sig.into(), pk.into()))
.collect(),
proof: proof.into(),
proof: Some(proof.into()),
}
}
}
@ -400,7 +404,9 @@ impl TryFrom<WitnessSet> for nssa::privacy_preserving_transaction::witness_set::
Ok(Self::from_raw_parts(
signatures_and_public_keys,
proof.into(),
proof
.map(Into::into)
.ok_or_else(|| nssa::error::NssaError::InvalidInput("Missing proof".to_owned()))?,
))
}
}
@ -420,14 +426,7 @@ impl From<nssa::PublicTransaction> for PublicTransaction {
Self {
hash,
message: message.into(),
witness_set: WitnessSet {
signatures_and_public_keys: witness_set
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: Proof(vec![]), // Public transactions don't have proofs
},
witness_set: witness_set.into(),
}
}
}

View File

@ -241,7 +241,7 @@ pub struct PrivacyPreservingMessage {
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct WitnessSet {
pub signatures_and_public_keys: Vec<(Signature, PublicKey)>,
pub proof: Proof,
pub proof: Option<Proof>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]

View File

@ -30,16 +30,22 @@ pub trait Rpc {
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getBlockById")]
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned>;
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getBlockByHash")]
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned>;
async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>;
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned>;
#[method(name = "getBlocks")]
async fn get_blocks(

View File

@ -3,7 +3,7 @@ use std::net::SocketAddr;
use anyhow::{Context as _, Result};
pub use indexer_core::config::*;
use indexer_service_rpc::RpcServer as _;
use jsonrpsee::server::Server;
use jsonrpsee::server::{Server, ServerHandle};
use log::{error, info};
pub mod service;
@ -13,10 +13,11 @@ pub mod mock_service;
pub struct IndexerHandle {
addr: SocketAddr,
server_handle: Option<jsonrpsee::server::ServerHandle>,
/// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>,
}
impl IndexerHandle {
const fn new(addr: SocketAddr, server_handle: jsonrpsee::server::ServerHandle) -> Self {
const fn new(addr: SocketAddr, server_handle: ServerHandle) -> Self {
Self {
addr,
server_handle: Some(server_handle),
@ -28,6 +29,7 @@ impl IndexerHandle {
self.addr
}
/// Wait for all Indexer tasks to stop.
pub async fn stopped(mut self) {
let handle = self
.server_handle
@ -37,15 +39,11 @@ impl IndexerHandle {
handle.stopped().await;
}
#[expect(
clippy::redundant_closure_for_method_calls,
reason = "Clippy suggested path jsonrpsee::jsonrpsee_server::ServerHandle is not accessible"
)]
#[must_use]
pub fn is_stopped(&self) -> bool {
pub fn is_healthy(&self) -> bool {
self.server_handle
.as_ref()
.is_none_or(|handle| handle.is_stopped())
.is_some_and(|handle| !handle.is_stopped())
}
}

View File

@ -15,7 +15,10 @@ use indexer_service_protocol::{
ProgramDeploymentTransaction, ProgramId, PublicMessage, PublicTransaction, Signature,
Transaction, ValidityWindow, WitnessSet,
};
use jsonrpsee::{core::SubscriptionResult, types::ErrorObjectOwned};
use jsonrpsee::{
core::{SubscriptionResult, async_trait},
types::ErrorObjectOwned,
};
/// A mock implementation of the `IndexerService` RPC for testing purposes.
pub struct MockIndexerService {
@ -92,7 +95,7 @@ impl MockIndexerService {
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: indexer_service_protocol::Proof(vec![0; 32]),
proof: None,
},
}),
// PrivacyPreserving transactions
@ -125,7 +128,7 @@ impl MockIndexerService {
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: indexer_service_protocol::Proof(vec![0; 32]),
proof: Some(indexer_service_protocol::Proof(vec![0; 32])),
},
}),
// ProgramDeployment transactions (rare)
@ -172,7 +175,7 @@ impl MockIndexerService {
}
}
#[async_trait::async_trait]
#[async_trait]
impl indexer_service_rpc::RpcServer for MockIndexerService {
async fn subscribe_to_finalized_blocks(
&self,
@ -199,26 +202,23 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
})
}
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned> {
self.blocks
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.blocks
.iter()
.find(|b| b.header.block_id == block_id)
.cloned()
.ok_or_else(|| {
ErrorObjectOwned::owned(
-32001,
format!("Block with ID {block_id} not found"),
None::<()>,
)
})
.cloned())
}
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned> {
self.blocks
async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.blocks
.iter()
.find(|b| b.header.hash == block_hash)
.cloned()
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Block with hash not found", None::<()>))
.cloned())
}
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
@ -228,11 +228,11 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
}
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned> {
self.transactions
.get(&tx_hash)
.map(|(tx, _)| tx.clone())
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>))
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned> {
Ok(self.transactions.get(&tx_hash).map(|(tx, _)| tx.clone()))
}
async fn get_blocks(

View File

@ -7,7 +7,7 @@ use indexer_core::{IndexerCore, config::IndexerConfig};
use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Transaction};
use jsonrpsee::{
SubscriptionSink,
core::{Serialize, SubscriptionResult},
core::{Serialize, SubscriptionResult, async_trait},
types::{ErrorCode, ErrorObject, ErrorObjectOwned},
};
use log::{debug, error, info, warn};
@ -30,7 +30,7 @@ impl IndexerService {
}
}
#[async_trait::async_trait]
#[async_trait]
impl indexer_service_rpc::RpcServer for IndexerService {
async fn subscribe_to_finalized_blocks(
&self,
@ -52,22 +52,25 @@ impl indexer_service_rpc::RpcServer for IndexerService {
self.indexer.store.get_last_block_id().map_err(db_error)
}
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned> {
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.indexer
.store
.get_block_at_id(block_id)
.map_err(db_error)?
.into())
.map(Into::into))
}
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned> {
async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.indexer
.store
.get_block_by_hash(block_hash.0)
.map_err(db_error)?
.into())
.map(Into::into))
}
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
@ -80,13 +83,16 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned> {
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned> {
Ok(self
.indexer
.store
.get_transaction_by_hash(tx_hash.0)
.map_err(db_error)?
.into())
.map(Into::into))
}
async fn get_blocks(

View File

@ -11,7 +11,7 @@ workspace = true
nssa_core = { workspace = true, features = ["host"] }
nssa.workspace = true
sequencer_core = { workspace = true, features = ["default", "testnet"] }
sequencer_runner.workspace = true
sequencer_service.workspace = true
wallet.workspace = true
common.workspace = true
key_protocol.workspace = true
@ -19,6 +19,7 @@ indexer_service.workspace = true
serde_json.workspace = true
token_core.workspace = true
indexer_service_rpc.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet-ffi.workspace = true
url.workspace = true
@ -26,11 +27,9 @@ url.workspace = true
anyhow.workspace = true
env_logger.workspace = true
log.workspace = true
base64.workspace = true
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
hex.workspace = true
tempfile.workspace = true
borsh.workspace = true
bytesize.workspace = true
futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] }

View File

@ -59,11 +59,11 @@ impl InitialData {
let mut private_charlie_key_chain = KeyChain::new_os_random();
let mut private_charlie_account_id =
AccountId::from(&private_charlie_key_chain.nullifer_public_key);
AccountId::from(&private_charlie_key_chain.nullifier_public_key);
let mut private_david_key_chain = KeyChain::new_os_random();
let mut private_david_account_id =
AccountId::from(&private_david_key_chain.nullifer_public_key);
AccountId::from(&private_david_key_chain.nullifier_public_key);
// Ensure consistent ordering
if private_charlie_account_id > private_david_account_id {
@ -120,7 +120,7 @@ impl InitialData {
self.private_accounts
.iter()
.map(|(key_chain, account)| CommitmentsInitialData {
npk: key_chain.nullifer_public_key.clone(),
npk: key_chain.nullifier_public_key.clone(),
account: account.clone(),
})
.collect()
@ -138,7 +138,7 @@ impl InitialData {
})
})
.chain(self.private_accounts.iter().map(|(key_chain, account)| {
let account_id = AccountId::from(&key_chain.nullifer_public_key);
let account_id = AccountId::from(&key_chain.nullifier_public_key);
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
account_id,
account: account.clone(),
@ -204,7 +204,6 @@ pub fn sequencer_config(
Ok(SequencerConfig {
home,
override_rust_log: None,
genesis_id: 1,
is_genesis_random: true,
max_num_tx_in_block,
@ -212,7 +211,6 @@ pub fn sequencer_config(
mempool_max_size,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_mins(2),
port: 0,
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
@ -236,7 +234,6 @@ pub fn wallet_config(
initial_data: &InitialData,
) -> Result<WalletConfig> {
Ok(WalletConfig {
override_rust_log: None,
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?,
seq_poll_timeout: Duration::from_secs(30),

View File

@ -3,15 +3,15 @@
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
use anyhow::{Context as _, Result, bail};
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
use common::{HashType, sequencer_client::SequencerClient, transaction::NSSATransaction};
use common::{HashType, transaction::NSSATransaction};
use futures::FutureExt as _;
use indexer_service::IndexerHandle;
use log::{debug, error, warn};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_runner::SequencerHandle;
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides};
@ -38,7 +38,8 @@ pub struct TestContext {
indexer_client: IndexerClient,
wallet: WalletCore,
wallet_password: String,
sequencer_handle: SequencerHandle,
/// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
indexer_handle: IndexerHandle,
bedrock_compose: DockerCompose,
_temp_indexer_dir: TempDir,
@ -90,8 +91,9 @@ impl TestContext {
.context("Failed to convert sequencer addr to URL")?;
let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr())
.context("Failed to convert indexer addr to URL")?;
let sequencer_client =
SequencerClient::new(sequencer_url).context("Failed to create sequencer client")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
let indexer_client = IndexerClient::new(&indexer_url)
.await
.context("Failed to create indexer client")?;
@ -102,7 +104,7 @@ impl TestContext {
wallet,
wallet_password,
bedrock_compose,
sequencer_handle,
sequencer_handle: Some(sequencer_handle),
indexer_handle,
_temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir,
@ -229,7 +231,7 @@ impl TestContext {
)
.context("Failed to create Sequencer config")?;
let sequencer_handle = sequencer_runner::startup_sequencer(config).await?;
let sequencer_handle = sequencer_service::run(config, 0).await?;
Ok((sequencer_handle, temp_sequencer_dir))
}
@ -333,18 +335,20 @@ impl Drop for TestContext {
wallet_password: _,
} = self;
if sequencer_handle.is_finished() {
let Err(err) = self
.sequencer_handle
.run_forever()
let sequencer_handle = sequencer_handle
.take()
.expect("Sequencer handle should be present in TestContext drop");
if !sequencer_handle.is_healthy() {
let Err(err) = sequencer_handle
.failed()
.now_or_never()
.expect("Future is finished and should be ready");
.expect("Sequencer handle should not be running");
error!(
"Sequencer handle has unexpectedly finished before TestContext drop with error: {err:#}"
"Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
);
}
if indexer_handle.is_stopped() {
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop");
}
@ -459,15 +463,8 @@ pub async fn fetch_privacy_preserving_tx(
seq_client: &SequencerClient,
tx_hash: HashType,
) -> PrivacyPreservingTransaction {
let transaction_encoded = seq_client
.get_transaction_by_hash(tx_hash)
.await
.unwrap()
.transaction
.unwrap();
let tx = seq_client.get_transaction(tx_hash).await.unwrap().unwrap();
let tx_bytes = BASE64.decode(transaction_encoded).unwrap();
let tx = borsh::from_slice(&tx_bytes).unwrap();
match tx {
NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => {
privacy_preserving_transaction
@ -480,8 +477,10 @@ pub async fn verify_commitment_is_in_state(
commitment: Commitment,
seq_client: &SequencerClient,
) -> bool {
matches!(
seq_client.get_proof_for_commitment(commitment).await,
Ok(Some(_))
)
seq_client
.get_proof_for_commitment(commitment)
.await
.ok()
.flatten()
.is_some()
}

View File

@ -7,6 +7,7 @@ use anyhow::Result;
use integration_tests::TestContext;
use log::info;
use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command,
@ -21,8 +22,7 @@ async fn get_existing_account() -> Result<()> {
let account = ctx
.sequencer_client()
.get_account(ctx.existing_public_accounts()[0])
.await?
.account;
.await?;
assert_eq!(
account.program_owner,

View File

@ -9,6 +9,7 @@ use std::time::Duration;
use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
@ -194,20 +195,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_1)
.await?
.account;
.await?;
let user_holding_b_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_2)
.await?
.account;
.await?;
let user_holding_lp_acc = ctx
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -243,20 +238,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_1)
.await?
.account;
.await?;
let user_holding_b_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_2)
.await?
.account;
.await?;
let user_holding_lp_acc = ctx
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -292,20 +281,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_1)
.await?
.account;
.await?;
let user_holding_b_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_2)
.await?
.account;
.await?;
let user_holding_lp_acc = ctx
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -342,20 +325,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_1)
.await?
.account;
.await?;
let user_holding_b_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_2)
.await?
.account;
.await?;
let user_holding_lp_acc = ctx
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -392,20 +369,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_1)
.await?
.account;
.await?;
let user_holding_b_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_2)
.await?
.account;
.await?;
let user_holding_lp_acc = ctx
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),

View File

@ -8,6 +8,7 @@ use integration_tests::{
use log::info;
use nssa::{AccountId, program::Program};
use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
@ -135,7 +136,7 @@ async fn deshielded_transfer_to_public_account() -> Result<()> {
let acc_2_balance = ctx.sequencer_client().get_account_balance(to).await?;
assert_eq!(from_acc.balance, 9900);
assert_eq!(acc_2_balance.balance, 20100);
assert_eq!(acc_2_balance, 20100);
info!("Successfully deshielded transfer to public account");
@ -175,7 +176,7 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)),
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
});
@ -245,7 +246,7 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> {
let acc_from_balance = ctx.sequencer_client().get_account_balance(from).await?;
assert_eq!(acc_from_balance.balance, 9900);
assert_eq!(acc_from_balance, 9900);
assert_eq!(acc_to.balance, 20100);
info!("Successfully shielded transfer to owned private account");
@ -290,7 +291,7 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> {
.await
);
assert_eq!(acc_1_balance.balance, 9900);
assert_eq!(acc_1_balance, 9900);
info!("Successfully shielded transfer to foreign account");
@ -335,7 +336,7 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)),
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
});

View File

@ -4,6 +4,7 @@ use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info;
use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
@ -41,8 +42,8 @@ async fn successful_transfer_to_existing_account() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900);
assert_eq!(acc_2_balance.balance, 20100);
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
Ok(())
}
@ -97,8 +98,8 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900);
assert_eq!(acc_2_balance.balance, 100);
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 100);
Ok(())
}
@ -134,8 +135,8 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 10000);
assert_eq!(acc_2_balance.balance, 20000);
assert_eq!(acc_1_balance, 10000);
assert_eq!(acc_2_balance, 20000);
Ok(())
}
@ -171,8 +172,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900);
assert_eq!(acc_2_balance.balance, 20100);
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("First TX Success!");
@ -203,8 +204,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9800);
assert_eq!(acc_2_balance.balance, 20200);
assert_eq!(acc_1_balance, 9800);
assert_eq!(acc_2_balance, 20200);
info!("Second TX Success!");
@ -230,11 +231,7 @@ async fn initialize_public_account() -> Result<()> {
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Checking correct execution");
let account = ctx
.sequencer_client()
.get_account(account_id)
.await?
.account;
let account = ctx.sequencer_client().get_account(account_id).await?;
assert_eq!(
account.program_owner,

View File

@ -8,11 +8,12 @@ use std::time::Duration;
use anyhow::Result;
use bytesize::ByteSize;
use common::{block::HashableBlockData, transaction::NSSATransaction};
use common::transaction::NSSATransaction;
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, config::SequencerPartialConfig,
};
use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
#[test]
@ -36,7 +37,10 @@ async fn reject_oversized_transaction() -> Result<()> {
let tx = nssa::ProgramDeploymentTransaction::new(message);
// Try to submit the transaction and expect an error
let result = ctx.sequencer_client().send_tx_program(tx).await;
let result = ctx
.sequencer_client()
.send_transaction(NSSATransaction::ProgramDeployment(tx))
.await;
assert!(
result.is_err(),
@ -74,7 +78,10 @@ async fn accept_transaction_within_limit() -> Result<()> {
let tx = nssa::ProgramDeploymentTransaction::new(message);
// This should succeed
let result = ctx.sequencer_client().send_tx_program(tx).await;
let result = ctx
.sequencer_client()
.send_transaction(NSSATransaction::ProgramDeployment(tx))
.await;
assert!(
result.is_ok(),
@ -112,33 +119,38 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> {
let burner_id = Program::new(burner_bytecode.clone())?.id();
let chain_caller_id = Program::new(chain_caller_bytecode.clone())?.id();
let initial_block_height = ctx.sequencer_client().get_last_block().await?.last_block;
let initial_block_height = ctx.sequencer_client().get_last_block_id().await?;
// Submit both program deployments
ctx.sequencer_client()
.send_tx_program(nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(burner_bytecode),
.send_transaction(NSSATransaction::ProgramDeployment(
nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(burner_bytecode),
),
))
.await?;
ctx.sequencer_client()
.send_tx_program(nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(chain_caller_bytecode),
.send_transaction(NSSATransaction::ProgramDeployment(
nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(chain_caller_bytecode),
),
))
.await?;
// Wait for first block
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let block1_response = ctx
let block1 = ctx
.sequencer_client()
.get_block(initial_block_height + 1)
.await?;
let block1: HashableBlockData = borsh::from_slice(&block1_response.block)?;
.await?
.unwrap();
// Check which program is in block 1
let get_program_ids = |block: &HashableBlockData| -> Vec<nssa::ProgramId> {
let get_program_ids = |block: &common::block::Block| -> Vec<nssa::ProgramId> {
block
.body
.transactions
.iter()
.filter_map(|tx| {
@ -168,11 +180,11 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> {
// Wait for second block
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let block2_response = ctx
let block2 = ctx
.sequencer_client()
.get_block(initial_block_height + 2)
.await?;
let block2: HashableBlockData = borsh::from_slice(&block2_response.block)?;
.await?
.unwrap();
let block2_program_ids = get_program_ids(&block2);
// The other program should be in block 2

View File

@ -22,12 +22,8 @@ async fn indexer_test_run() -> Result<()> {
// RUN OBSERVATION
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
let last_block_seq = ctx
.sequencer_client()
.get_last_block()
.await
.unwrap()
.last_block;
let last_block_seq =
sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client()).await?;
info!("Last block on seq now is {last_block_seq}");
@ -100,20 +96,22 @@ async fn indexer_state_consistency() -> Result<()> {
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
info!("Checking correct balance move");
let acc_1_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[0])
.await?;
let acc_2_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[1])
.await?;
let acc_1_balance = sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
)
.await?;
let acc_2_balance = sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
)
.await?;
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900);
assert_eq!(acc_2_balance.balance, 20100);
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
// WAIT
info!("Waiting for indexer to parse blocks");
@ -131,16 +129,16 @@ async fn indexer_state_consistency() -> Result<()> {
.unwrap();
info!("Checking correct state transition");
let acc1_seq_state = ctx
.sequencer_client()
.get_account(ctx.existing_public_accounts()[0])
.await?
.account;
let acc2_seq_state = ctx
.sequencer_client()
.get_account(ctx.existing_public_accounts()[1])
.await?
.account;
let acc1_seq_state = sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
)
.await?;
let acc2_seq_state = sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
)
.await?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());
assert_eq!(acc2_ind_state, acc2_seq_state.into());

View File

@ -14,6 +14,7 @@ use integration_tests::{
use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info;
use nssa::{AccountId, program::Program};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
@ -70,7 +71,7 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)),
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
});
@ -305,8 +306,8 @@ async fn restore_keys_from_seed() -> Result<()> {
.get_account_balance(to_account_id4)
.await?;
assert_eq!(acc3.balance, 91); // 102 - 11
assert_eq!(acc4.balance, 114); // 103 + 11
assert_eq!(acc3, 91); // 102 - 11
assert_eq!(acc4, 114); // 103 + 11
info!("Successfully restored keys and verified transactions");

View File

@ -13,6 +13,7 @@ use integration_tests::{
format_public_account_id, verify_commitment_is_in_state,
};
use log::info;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
@ -46,8 +47,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()>
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
@ -70,8 +70,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()>
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
assert_eq!(pinata_balance_post, pinata_balance_pre);
@ -102,8 +101,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
@ -126,8 +124,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
assert_eq!(pinata_balance_post, pinata_balance_pre);
@ -146,8 +143,7 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> {
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -158,14 +154,12 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> {
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
let winner_balance_post = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[0])
.await?
.balance;
.await?;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);
assert_eq!(winner_balance_post, 10000 + pinata_prize);
@ -187,8 +181,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> {
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash: _ } = result else {
@ -211,8 +204,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> {
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);
@ -268,8 +260,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -285,8 +276,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
.await?;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);

View File

@ -6,11 +6,13 @@
use std::{path::PathBuf, time::Duration};
use anyhow::Result;
use common::transaction::NSSATransaction;
use integration_tests::{
NSSA_PROGRAM_FOR_TEST_DATA_CHANGER, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext,
};
use log::info;
use nssa::{AccountId, program::Program};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::Command;
@ -47,18 +49,17 @@ async fn deploy_and_execute_program() -> Result<()> {
)?;
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]);
let transaction = nssa::PublicTransaction::new(message, witness_set);
let _response = ctx.sequencer_client().send_tx_public(transaction).await?;
let _response = ctx
.sequencer_client()
.send_transaction(NSSATransaction::Public(transaction))
.await?;
info!("Waiting for next block creation");
// Waiting for long time as it may take some time for such a big transaction to be included in a
// block
tokio::time::sleep(Duration::from_secs(2 * TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let post_state_account = ctx
.sequencer_client()
.get_account(account_id)
.await?
.account;
let post_state_account = ctx.sequencer_client().get_account(account_id).await?;
assert_eq!(post_state_account.program_owner, data_changer.id());
assert_eq!(post_state_account.balance, 0);

View File

@ -14,6 +14,7 @@ use integration_tests::{
use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info;
use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use token_core::{TokenDefinition, TokenHolding};
use tokio::test;
use wallet::cli::{
@ -92,8 +93,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?
.account;
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(definition_acc.program_owner, Program::token().id());
@ -110,8 +110,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let supply_acc = ctx
.sequencer_client()
.get_account(supply_account_id)
.await?
.account;
.await?;
// The account must be owned by the token program
assert_eq!(supply_acc.program_owner, Program::token().id());
@ -143,8 +142,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let supply_acc = ctx
.sequencer_client()
.get_account(supply_account_id)
.await?
.account;
.await?;
assert_eq!(supply_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&supply_acc.data)?;
assert_eq!(
@ -159,8 +157,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id)
.await?
.account;
.await?;
assert_eq!(recipient_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
@ -188,8 +185,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?
.account;
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(
@ -205,8 +201,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id)
.await?
.account;
.await?;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
@ -236,8 +231,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?
.account;
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(
@ -253,8 +247,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id)
.await?
.account;
.await?;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
@ -341,8 +334,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?
.account;
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(definition_acc.program_owner, Program::token().id());
@ -405,8 +397,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?
.account;
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(
@ -506,8 +497,7 @@ async fn create_token_with_private_definition() -> Result<()> {
let supply_acc = ctx
.sequencer_client()
.get_account(supply_account_id)
.await?
.account;
.await?;
assert_eq!(supply_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&supply_acc.data)?;
@ -586,8 +576,7 @@ async fn create_token_with_private_definition() -> Result<()> {
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id_public)
.await?
.account;
.await?;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
@ -882,8 +871,7 @@ async fn shielded_token_transfer() -> Result<()> {
let supply_acc = ctx
.sequencer_client()
.get_account(supply_account_id)
.await?
.account;
.await?;
let token_holding = TokenHolding::try_from(&supply_acc.data)?;
assert_eq!(
token_holding,
@ -1026,8 +1014,7 @@ async fn deshielded_token_transfer() -> Result<()> {
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id)
.await?
.account;
.await?;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
token_holding,
@ -1123,7 +1110,7 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_private_account_id(definition_account_id),
holder: None,
holder_npk: Some(hex::encode(holder_keys.nullifer_public_key.0)),
holder_npk: Some(hex::encode(holder_keys.nullifier_public_key.0)),
holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)),
amount: mint_amount,
};

View File

@ -13,6 +13,7 @@ use std::time::{Duration, Instant};
use anyhow::Result;
use bytesize::ByteSize;
use common::transaction::NSSATransaction;
use integration_tests::{
TestContext,
config::{InitialData, SequencerPartialConfig},
@ -30,6 +31,7 @@ use nssa_core::{
account::{AccountWithMetadata, Nonce, data::Data},
encryption::ViewingPublicKey,
};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
pub(crate) struct TpsTestManager {
@ -153,10 +155,9 @@ pub async fn tps_test() -> Result<()> {
for (i, tx) in txs.into_iter().enumerate() {
let tx_hash = ctx
.sequencer_client()
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap()
.tx_hash;
.unwrap();
info!("Sent tx {i}");
tx_hashes.push(tx_hash);
}
@ -170,15 +171,13 @@ pub async fn tps_test() -> Result<()> {
let tx_obj = ctx
.sequencer_client()
.get_transaction_by_hash(*tx_hash)
.get_transaction(*tx_hash)
.await
.inspect_err(|err| {
log::warn!("Failed to get transaction by hash {tx_hash} with error: {err:#?}");
});
if let Ok(tx_obj) = tx_obj
&& tx_obj.transaction.is_some()
{
if tx_obj.is_ok_and(|opt| opt.is_some()) {
info!("Found tx {i} with hash {tx_hash}");
break;
}

View File

@ -606,7 +606,7 @@ fn test_wallet_ffi_get_private_account_keys() -> Result<()> {
.unwrap()
.0;
let expected_npk = &key_chain.nullifer_public_key;
let expected_npk = &key_chain.nullifier_public_key;
let expected_vpk = &key_chain.viewing_public_key;
assert_eq!(&keys.npk(), expected_npk);

View File

@ -19,10 +19,12 @@ serde.workspace = true
k256.workspace = true
sha2.workspace = true
rand.workspace = true
base58.workspace = true
hex.workspace = true
aes-gcm.workspace = true
bip39.workspace = true
hmac-sha512.workspace = true
thiserror.workspace = true
itertools.workspace = true
[dev-dependencies]
base58.workspace = true

View File

@ -39,7 +39,7 @@ impl KeyNode for ChildKeysPrivate {
value: (
KeyChain {
secret_spending_key: ssk,
nullifer_public_key: npk,
nullifier_public_key: npk,
viewing_public_key: vpk,
private_key_holder: PrivateKeyHolder {
nullifier_secret_key: nsk,
@ -54,10 +54,7 @@ impl KeyNode for ChildKeysPrivate {
}
fn nth_child(&self, cci: u32) -> Self {
#[expect(
clippy::arithmetic_side_effects,
reason = "Multiplying finite field scalars gives no unexpected side effects"
)]
#[expect(clippy::arithmetic_side_effects, reason = "TODO: fix later")]
let parent_pt =
Scalar::from_repr(self.value.0.private_key_holder.nullifier_secret_key.into())
.expect("Key generated as scalar, must be valid representation")
@ -67,7 +64,8 @@ impl KeyNode for ChildKeysPrivate {
input.extend_from_slice(b"LEE_seed_priv");
input.extend_from_slice(&parent_pt.to_bytes());
input.extend_from_slice(&cci.to_le_bytes());
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
input.extend_from_slice(&cci.to_be_bytes());
let hash_value = hmac_sha512::HMAC::mac(input, self.ccc);
@ -90,7 +88,7 @@ impl KeyNode for ChildKeysPrivate {
value: (
KeyChain {
secret_spending_key: ssk,
nullifer_public_key: npk,
nullifier_public_key: npk,
viewing_public_key: vpk,
private_key_holder: PrivateKeyHolder {
nullifier_secret_key: nsk,
@ -113,18 +111,26 @@ impl KeyNode for ChildKeysPrivate {
}
fn account_id(&self) -> nssa::AccountId {
nssa::AccountId::from(&self.value.0.nullifer_public_key)
nssa::AccountId::from(&self.value.0.nullifier_public_key)
}
}
impl<'keys> From<&'keys ChildKeysPrivate> for &'keys (KeyChain, nssa::Account) {
fn from(value: &'keys ChildKeysPrivate) -> Self {
#[expect(
clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a ChildKeysPrivate> for &'a (KeyChain, nssa::Account) {
fn from(value: &'a ChildKeysPrivate) -> Self {
&value.value
}
}
impl<'keys> From<&'keys mut ChildKeysPrivate> for &'keys mut (KeyChain, nssa::Account) {
fn from(value: &'keys mut ChildKeysPrivate) -> Self {
#[expect(
clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a mut ChildKeysPrivate> for &'a mut (KeyChain, nssa::Account) {
fn from(value: &'a mut ChildKeysPrivate) -> Self {
&mut value.value
}
}
@ -166,7 +172,7 @@ mod tests {
7, 123, 125, 191, 233, 183, 201, 4, 20, 214, 155, 210, 45, 234, 27, 240, 194, 111, 97,
247, 155, 113, 122, 246, 192, 0, 70, 61, 76, 71, 70, 2,
]);
let expected_vsk: ViewingSecretKey = [
let expected_vsk = [
155, 90, 54, 75, 228, 130, 68, 201, 129, 251, 180, 195, 250, 64, 34, 230, 241, 204,
216, 50, 149, 156, 10, 67, 208, 74, 9, 10, 47, 59, 50, 202,
];
@ -179,7 +185,7 @@ mod tests {
assert!(expected_ssk == keys.value.0.secret_spending_key);
assert!(expected_ccc == keys.ccc);
assert!(expected_nsk == keys.value.0.private_key_holder.nullifier_secret_key);
assert!(expected_npk == keys.value.0.nullifer_public_key);
assert!(expected_npk == keys.value.0.nullifier_public_key);
assert!(expected_vsk == keys.value.0.private_key_holder.viewing_secret_key);
assert!(expected_vpk_as_bytes == keys.value.0.viewing_public_key.to_bytes());
}
@ -197,31 +203,31 @@ mod tests {
let child_node = ChildKeysPrivate::nth_child(&root_node, 42_u32);
let expected_ccc: [u8; 32] = [
145, 59, 225, 32, 54, 168, 14, 45, 60, 253, 57, 202, 31, 86, 142, 234, 51, 57, 154, 88,
132, 200, 92, 191, 220, 144, 42, 184, 108, 35, 226, 146,
27, 73, 133, 213, 214, 63, 217, 184, 164, 17, 172, 140, 223, 95, 255, 157, 11, 0, 58,
53, 82, 147, 121, 120, 199, 50, 30, 28, 103, 24, 121, 187,
];
let expected_nsk: NullifierSecretKey = [
19, 100, 119, 73, 191, 225, 234, 219, 129, 88, 40, 229, 63, 225, 189, 136, 69, 172,
221, 186, 147, 83, 150, 207, 70, 17, 228, 70, 113, 87, 227, 31,
124, 61, 40, 92, 33, 135, 3, 41, 200, 234, 3, 69, 102, 184, 57, 191, 106, 151, 194,
192, 103, 132, 141, 112, 249, 108, 192, 117, 24, 48, 70, 216,
];
let expected_npk = nssa_core::NullifierPublicKey([
133, 235, 223, 151, 12, 69, 26, 222, 60, 125, 235, 125, 167, 212, 201, 168, 101, 242,
111, 239, 1, 228, 12, 252, 146, 53, 75, 17, 187, 255, 122, 181,
116, 231, 246, 189, 145, 240, 37, 59, 219, 223, 216, 246, 116, 171, 223, 55, 197, 200,
134, 192, 221, 40, 218, 167, 239, 5, 11, 95, 147, 247, 162, 226,
]);
let expected_vsk: ViewingSecretKey = [
218, 219, 193, 132, 160, 6, 178, 194, 139, 248, 199, 81, 17, 133, 37, 201, 58, 104, 49,
222, 187, 46, 156, 93, 14, 118, 209, 243, 38, 101, 77, 45,
33, 155, 68, 60, 102, 70, 47, 105, 194, 129, 44, 26, 143, 198, 44, 244, 185, 31, 236,
252, 205, 89, 138, 107, 39, 38, 154, 73, 109, 166, 41, 114,
];
let expected_vpk_as_bytes: [u8; 33] = [
3, 164, 65, 167, 88, 167, 179, 51, 159, 27, 241, 174, 77, 174, 142, 106, 128, 96, 69,
74, 117, 231, 42, 193, 235, 153, 206, 116, 102, 7, 101, 192, 45,
2, 78, 213, 113, 117, 105, 162, 248, 175, 68, 128, 232, 106, 204, 208, 159, 11, 78, 48,
244, 127, 112, 46, 0, 93, 184, 1, 77, 132, 160, 75, 152, 88,
];
assert!(expected_ccc == child_node.ccc);
assert!(expected_nsk == child_node.value.0.private_key_holder.nullifier_secret_key);
assert!(expected_npk == child_node.value.0.nullifer_public_key);
assert!(expected_npk == child_node.value.0.nullifier_public_key);
assert!(expected_vsk == child_node.value.0.private_key_holder.viewing_secret_key);
assert!(expected_vpk_as_bytes == child_node.value.0.viewing_public_key.to_bytes());
}

View File

@ -13,17 +13,25 @@ pub struct ChildKeysPublic {
}
impl ChildKeysPublic {
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
fn compute_hash_value(&self, cci: u32) -> [u8; 64] {
let mut hash_input = vec![];
if 2_u32.pow(31) > cci {
// Non-harden
hash_input.extend_from_slice(self.cpk.value());
if ((2_u32).pow(31)).cmp(&cci) == std::cmp::Ordering::Greater {
// Non-harden.
// BIP-032 compatibility requires 1-byte header from the public_key;
// Not stored in `self.cpk.value()`.
let sk = secp256k1::SecretKey::from_byte_array(*self.csk.value())
.expect("32 bytes, within curve order");
let pk = secp256k1::PublicKey::from_secret_key(&secp256k1::Secp256k1::new(), &sk);
hash_input.extend_from_slice(&secp256k1::PublicKey::serialize(&pk));
} else {
// Harden
// Harden.
hash_input.extend_from_slice(&[0_u8]);
hash_input.extend_from_slice(self.csk.value());
}
hash_input.extend_from_slice(&cci.to_le_bytes());
hash_input.extend_from_slice(&cci.to_be_bytes());
hmac_sha512::HMAC::mac(hash_input, self.ccc)
}
@ -55,11 +63,13 @@ impl KeyNode for ChildKeysPublic {
)
.unwrap();
let csk = nssa::PrivateKey::try_new(
csk.add_tweak(&Scalar::from_le_bytes(*self.csk.value()).unwrap())
let csk = nssa::PrivateKey::try_new({
let scalar = Scalar::from_be_bytes(*self.csk.value()).unwrap();
csk.add_tweak(&scalar)
.expect("Expect a valid Scalar")
.secret_bytes(),
)
.secret_bytes()
})
.unwrap();
assert!(
@ -94,8 +104,12 @@ impl KeyNode for ChildKeysPublic {
}
}
impl<'keys> From<&'keys ChildKeysPublic> for &'keys nssa::PrivateKey {
fn from(value: &'keys ChildKeysPublic) -> Self {
#[expect(
clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a ChildKeysPublic> for &'a nssa::PrivateKey {
fn from(value: &'a ChildKeysPublic) -> Self {
&value.csk
}
}
@ -126,6 +140,7 @@ mod tests {
202, 148, 181, 228, 35, 222, 58, 84, 156, 24, 146, 86,
])
.unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([
219, 141, 130, 105, 11, 203, 187, 124, 112, 75, 223, 22, 11, 164, 153, 127, 59, 247,
244, 166, 75, 66, 242, 224, 35, 156, 161, 75, 41, 51, 76, 245,
@ -149,26 +164,20 @@ mod tests {
let cci = (2_u32).pow(31) + 13;
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
print!(
"{} {}",
child_keys.csk.value()[0],
child_keys.csk.value()[1]
);
let expected_ccc = [
126, 175, 244, 41, 41, 173, 134, 103, 139, 140, 195, 86, 194, 147, 116, 48, 71, 107,
253, 235, 114, 139, 60, 115, 226, 205, 215, 248, 240, 190, 196, 6,
149, 226, 13, 4, 194, 12, 69, 29, 9, 234, 209, 119, 98, 4, 128, 91, 37, 103, 192, 31,
130, 126, 123, 20, 90, 34, 173, 209, 101, 248, 155, 36,
];
let expected_csk: PrivateKey = PrivateKey::try_new([
128, 148, 53, 165, 222, 155, 163, 108, 186, 182, 124, 67, 90, 86, 59, 123, 95, 224,
171, 4, 51, 131, 254, 57, 241, 178, 82, 161, 204, 206, 79, 107,
9, 65, 33, 228, 25, 82, 219, 117, 91, 217, 11, 223, 144, 85, 246, 26, 123, 216, 107,
213, 33, 52, 188, 22, 198, 246, 71, 46, 245, 174, 16, 47,
])
.unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([
149, 240, 55, 15, 178, 67, 245, 254, 44, 141, 95, 223, 238, 62, 85, 11, 248, 9, 11, 40,
69, 211, 116, 13, 189, 35, 8, 95, 233, 154, 129, 58,
142, 143, 238, 159, 105, 165, 224, 252, 108, 62, 53, 209, 176, 219, 249, 38, 90, 241,
201, 81, 194, 146, 236, 5, 83, 152, 238, 243, 138, 16, 229, 15,
])
.unwrap();
@ -189,26 +198,20 @@ mod tests {
let cci = 13;
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
print!(
"{} {}",
child_keys.csk.value()[0],
child_keys.csk.value()[1]
);
let expected_ccc = [
50, 29, 113, 102, 49, 130, 64, 0, 247, 95, 135, 187, 118, 162, 65, 65, 194, 53, 189,
242, 66, 178, 168, 2, 51, 193, 155, 72, 209, 2, 207, 251,
79, 228, 242, 119, 211, 203, 198, 175, 95, 36, 4, 234, 139, 45, 137, 138, 54, 211, 187,
16, 28, 79, 80, 232, 216, 101, 145, 19, 101, 220, 217, 141,
];
let expected_csk: PrivateKey = PrivateKey::try_new([
162, 32, 211, 190, 180, 74, 151, 246, 189, 93, 8, 57, 182, 239, 125, 245, 192, 255, 24,
186, 251, 23, 194, 186, 252, 121, 190, 54, 147, 199, 1, 109,
185, 147, 32, 242, 145, 91, 123, 77, 42, 33, 134, 84, 12, 165, 117, 70, 158, 201, 95,
153, 14, 12, 92, 235, 128, 156, 194, 169, 68, 35, 165, 127,
])
.unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([
183, 48, 207, 170, 221, 111, 118, 9, 40, 67, 123, 162, 159, 169, 34, 157, 23, 37, 232,
102, 231, 187, 199, 191, 205, 146, 159, 22, 79, 100, 10, 223,
119, 16, 145, 121, 97, 244, 186, 35, 136, 34, 140, 171, 206, 139, 11, 208, 207, 121,
158, 45, 28, 22, 140, 98, 161, 179, 212, 173, 238, 220, 2, 34,
])
.unwrap();
@ -230,19 +233,19 @@ mod tests {
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
let expected_ccc = [
101, 15, 69, 152, 144, 22, 105, 89, 175, 21, 13, 50, 160, 167, 93, 80, 94, 99, 192,
252, 1, 126, 196, 217, 149, 164, 60, 75, 237, 90, 104, 83,
221, 208, 47, 189, 174, 152, 33, 25, 151, 114, 233, 191, 57, 15, 40, 140, 46, 87, 126,
58, 215, 40, 246, 111, 166, 113, 183, 145, 173, 11, 27, 182,
];
let expected_csk: PrivateKey = PrivateKey::try_new([
46, 196, 131, 199, 190, 180, 250, 222, 41, 188, 221, 156, 255, 239, 251, 207, 239, 202,
166, 216, 107, 236, 195, 48, 167, 69, 97, 13, 132, 117, 76, 89,
223, 29, 87, 189, 126, 24, 117, 225, 190, 57, 0, 143, 207, 168, 231, 139, 170, 192, 81,
254, 126, 10, 115, 42, 141, 157, 70, 171, 199, 231, 198, 132,
])
.unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([
93, 151, 154, 238, 175, 198, 53, 146, 255, 43, 37, 52, 214, 165, 69, 161, 38, 20, 68,
166, 143, 80, 149, 216, 124, 203, 240, 114, 168, 111, 33, 83,
96, 123, 245, 51, 214, 216, 215, 205, 70, 145, 105, 221, 166, 169, 122, 27, 94, 112,
228, 110, 249, 177, 85, 173, 180, 248, 185, 199, 112, 246, 83, 33,
])
.unwrap();

View File

@ -1,7 +1,7 @@
use std::{collections::BTreeMap, sync::Arc};
use std::collections::BTreeMap;
use anyhow::Result;
use common::sequencer_client::SequencerClient;
use nssa::{Account, AccountId};
use serde::{Deserialize, Serialize};
use crate::key_management::{
@ -197,40 +197,6 @@ impl<N: KeyNode> KeyTree<N> {
}
impl KeyTree<ChildKeysPrivate> {
/// Cleanup of all non-initialized accounts in a private tree.
///
/// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) <
/// depth`.
///
/// If account is default, removes them.
///
/// Chain must be parsed for accounts beforehand.
///
/// Fast, leaves gaps between accounts.
pub fn cleanup_tree_remove_uninit_for_depth(&mut self, depth: u32) {
let mut id_stack = vec![ChainIndex::root()];
while let Some(curr_id) = id_stack.pop() {
if let Some(node) = self.key_map.get(&curr_id)
&& node.value.1 == nssa::Account::default()
&& curr_id != ChainIndex::root()
{
let addr = node.account_id();
self.remove(addr);
}
let mut next_id = curr_id.nth_child(0);
while (next_id.depth()) < depth {
id_stack.push(next_id.clone());
next_id = match next_id.next_in_line() {
Some(id) => id,
None => break,
};
}
}
}
/// Cleanup of non-initialized accounts in a private tree.
///
/// If account is default, removes them, stops at first non-default account.
@ -259,56 +225,17 @@ impl KeyTree<ChildKeysPrivate> {
}
impl KeyTree<ChildKeysPublic> {
/// Cleanup of all non-initialized accounts in a public tree.
///
/// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) <
/// depth`.
///
/// If account is default, removes them.
///
/// Fast, leaves gaps between accounts.
pub async fn cleanup_tree_remove_ininit_for_depth(
&mut self,
depth: u32,
client: Arc<SequencerClient>,
) -> Result<()> {
let mut id_stack = vec![ChainIndex::root()];
while let Some(curr_id) = id_stack.pop() {
if let Some(node) = self.key_map.get(&curr_id) {
let address = node.account_id();
let node_acc = client.get_account(address).await?.account;
if node_acc == nssa::Account::default() && curr_id != ChainIndex::root() {
self.remove(address);
}
}
let mut next_id = curr_id.nth_child(0);
while (next_id.depth()) < depth {
id_stack.push(next_id.clone());
next_id = match next_id.next_in_line() {
Some(id) => id,
None => break,
};
}
}
Ok(())
}
/// Cleanup of non-initialized accounts in a public tree.
///
/// If account is default, removes them, stops at first non-default account.
///
/// Walks through tree in lairs of same depth using `ChainIndex::chain_ids_at_depth()`.
/// Walks through tree in layers of same depth using `ChainIndex::chain_ids_at_depth()`.
///
/// Slow, maintains tree consistency.
pub async fn cleanup_tree_remove_uninit_layered(
pub async fn cleanup_tree_remove_uninit_layered<F: Future<Output = Result<Account>>>(
&mut self,
depth: u32,
client: Arc<SequencerClient>,
get_account: impl Fn(AccountId) -> F,
) -> Result<()> {
let depth = usize::try_from(depth).expect("Depth is expected to fit in usize");
'outer: for i in (1..depth).rev() {
@ -316,7 +243,7 @@ impl KeyTree<ChildKeysPublic> {
for id in ChainIndex::chain_ids_at_depth(i) {
if let Some(node) = self.key_map.get(&id) {
let address = node.account_id();
let node_acc = client.get_account(address).await?.account;
let node_acc = get_account(address).await?;
if node_acc == nssa::Account::default() {
let addr = node.account_id();

View File

@ -16,7 +16,7 @@ pub type PublicAccountSigningKey = [u8; 32];
pub struct KeyChain {
pub secret_spending_key: SecretSpendingKey,
pub private_key_holder: PrivateKeyHolder,
pub nullifer_public_key: NullifierPublicKey,
pub nullifier_public_key: NullifierPublicKey,
pub viewing_public_key: ViewingPublicKey,
}
@ -30,13 +30,13 @@ impl KeyChain {
let private_key_holder = secret_spending_key.produce_private_key_holder(None);
let nullifer_public_key = private_key_holder.generate_nullifier_public_key();
let nullifier_public_key = private_key_holder.generate_nullifier_public_key();
let viewing_public_key = private_key_holder.generate_viewing_public_key();
Self {
secret_spending_key,
private_key_holder,
nullifer_public_key,
nullifier_public_key,
viewing_public_key,
}
}
@ -50,13 +50,13 @@ impl KeyChain {
let private_key_holder = secret_spending_key.produce_private_key_holder(None);
let nullifer_public_key = private_key_holder.generate_nullifier_public_key();
let nullifier_public_key = private_key_holder.generate_nullifier_public_key();
let viewing_public_key = private_key_holder.generate_viewing_public_key();
Self {
secret_spending_key,
private_key_holder,
nullifer_public_key,
nullifier_public_key,
viewing_public_key,
}
}
@ -93,7 +93,7 @@ mod tests {
// Check that key holder fields are initialized with expected types
assert_ne!(
account_id_key_holder.nullifer_public_key.as_ref(),
account_id_key_holder.nullifier_public_key.as_ref(),
&[0_u8; 32]
);
}
@ -119,7 +119,7 @@ mod tests {
let utxo_secret_key_holder = top_secret_key_holder.produce_private_key_holder(None);
let nullifer_public_key = utxo_secret_key_holder.generate_nullifier_public_key();
let nullifier_public_key = utxo_secret_key_holder.generate_nullifier_public_key();
let viewing_public_key = utxo_secret_key_holder.generate_viewing_public_key();
let pub_account_signing_key = nssa::PrivateKey::new_os_random();
@ -150,7 +150,7 @@ mod tests {
println!("Account {:?}", account.value().to_base58());
println!(
"Nulifier public key {:?}",
hex::encode(nullifer_public_key.to_byte_array())
hex::encode(nullifier_public_key.to_byte_array())
);
println!(
"Viewing public key {:?}",
@ -183,7 +183,7 @@ mod tests {
fn non_trivial_chain_index() {
let keys = account_with_chain_index_2_for_tests();
let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifer_public_key);
let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifier_public_key);
let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key);
let key_receiver = keys.calculate_shared_secret_receiver(

View File

@ -10,16 +10,16 @@ use sha2::{Digest as _, digest::FixedOutput as _};
const NSSA_ENTROPY_BYTES: [u8; 32] = [0; 32];
#[derive(Debug)]
/// Seed holder. Non-clonable to ensure that different holders use different seeds.
/// Produces `TopSecretKeyHolder` objects.
#[derive(Debug)]
pub struct SeedHolder {
// ToDo: Needs to be vec as serde derives is not implemented for [u8; 64]
pub(crate) seed: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
/// Secret spending key object. Can produce `PrivateKeyHolder` objects.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SecretSpendingKey(pub(crate) [u8; 32]);
pub type ViewingSecretKey = Scalar;
@ -79,6 +79,7 @@ impl SeedHolder {
impl SecretSpendingKey {
#[must_use]
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
pub fn generate_nullifier_secret_key(&self, index: Option<u32>) -> NullifierSecretKey {
const PREFIX: &[u8; 8] = b"LEE/keys";
const SUFFIX_1: &[u8; 1] = &[1];
@ -93,13 +94,14 @@ impl SecretSpendingKey {
hasher.update(PREFIX);
hasher.update(self.0);
hasher.update(SUFFIX_1);
hasher.update(index.to_le_bytes());
hasher.update(index.to_be_bytes());
hasher.update(SUFFIX_2);
<NullifierSecretKey>::from(hasher.finalize_fixed())
}
#[must_use]
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
pub fn generate_viewing_secret_key(&self, index: Option<u32>) -> ViewingSecretKey {
const PREFIX: &[u8; 8] = b"LEE/keys";
const SUFFIX_1: &[u8; 1] = &[2];
@ -114,7 +116,7 @@ impl SecretSpendingKey {
hasher.update(PREFIX);
hasher.update(self.0);
hasher.update(SUFFIX_1);
hasher.update(index.to_le_bytes());
hasher.update(index.to_be_bytes());
hasher.update(SUFFIX_2);
hasher.finalize_fixed().into()

View File

@ -46,7 +46,7 @@ impl NSSAUserData {
) -> bool {
let mut check_res = true;
for (account_id, (key, _)) in accounts_keys_map {
let expected_account_id = nssa::AccountId::from(&key.nullifer_public_key);
let expected_account_id = nssa::AccountId::from(&key.nullifier_public_key);
if expected_account_id != *account_id {
println!("{expected_account_id}, {account_id}");
check_res = false;
@ -66,13 +66,13 @@ impl NSSAUserData {
) -> Result<Self> {
if !Self::valid_public_key_transaction_pairing_check(&default_accounts_keys) {
anyhow::bail!(
"Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys"
"Key transaction pairing check not satisfied, there are public account_ids, which are not derived from keys"
);
}
if !Self::valid_private_key_transaction_pairing_check(&default_accounts_key_chains) {
anyhow::bail!(
"Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys"
"Key transaction pairing check not satisfied, there are private account_ids, which are not derived from keys"
);
}

View File

@ -14,6 +14,7 @@ anyhow.workspace = true
thiserror.workspace = true
risc0-zkvm.workspace = true
serde.workspace = true
serde_with.workspace = true
sha2.workspace = true
rand.workspace = true
borsh.workspace = true
@ -37,4 +38,4 @@ test-case = "3.3.1"
[features]
default = []
prove = ["risc0-zkvm/prove"]
test-utils = []
test-utils = []

View File

@ -1,3 +1,5 @@
use std::str::FromStr;
use borsh::{BorshDeserialize, BorshSerialize};
pub use private_key::PrivateKey;
pub use public_key::PublicKey;
@ -12,11 +14,27 @@ pub struct Signature {
}
impl std::fmt::Debug for Signature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for Signature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.value))
}
}
impl FromStr for Signature {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 64];
hex::decode_to_slice(s, &mut bytes)?;
Ok(Self { value: bytes })
}
}
impl Signature {
#[must_use]
pub fn new(key: &PrivateKey, message: &[u8]) -> Self {

View File

@ -1,13 +1,37 @@
use std::str::FromStr;
use rand::{Rng as _, rngs::OsRng};
use serde::{Deserialize, Serialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
use crate::error::NssaError;
// TODO: Remove Debug, Clone, Serialize, Deserialize, PartialEq and Eq for security reasons
// TODO: Implement Zeroize
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Clone, SerializeDisplay, DeserializeFromStr, PartialEq, Eq)]
pub struct PrivateKey([u8; 32]);
impl std::fmt::Debug for PrivateKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for PrivateKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
impl FromStr for PrivateKey {
type Err = NssaError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes).map_err(|_err| NssaError::InvalidPrivateKey)?;
Self::try_new(bytes)
}
}
impl PrivateKey {
#[must_use]
pub fn new_os_random() -> Self {

View File

@ -1,19 +1,38 @@
use std::str::FromStr;
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::account::AccountId;
use serde::{Deserialize, Serialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
use sha2::{Digest as _, Sha256};
use crate::{PrivateKey, error::NssaError};
#[derive(Clone, PartialEq, Eq, BorshSerialize, Serialize, Deserialize)]
#[derive(Clone, PartialEq, Eq, BorshSerialize, SerializeDisplay, DeserializeFromStr)]
pub struct PublicKey([u8; 32]);
impl std::fmt::Debug for PublicKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for PublicKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
impl FromStr for PublicKey {
type Err = NssaError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes)
.map_err(|_err| NssaError::InvalidPublicKey(secp256k1::Error::InvalidPublicKey))?;
Self::try_new(bytes)
}
}
impl BorshDeserialize for PublicKey {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut buf = [0_u8; 32];

View File

@ -8,10 +8,9 @@ license = { workspace = true }
workspace = true
[dependencies]
nssa = { workspace = true, optional = true, features = ["test-utils"], default-features = true }
nssa_core.workspace = true
token_core.workspace = true
amm_core.workspace = true
[features]
nssa = ["dep:nssa"]
[dev-dependencies]
nssa = { workspace = true, features = ["test-utils"] }

View File

@ -4,7 +4,6 @@ use amm_core::{
PoolDefinition, compute_liquidity_token_pda, compute_liquidity_token_pda_seed,
compute_pool_pda, compute_vault_pda, compute_vault_pda_seed,
};
#[cfg(feature = "nssa")]
use nssa::{
PrivateKey, PublicKey, PublicTransaction, V02State, program::Program, public_transaction,
};
@ -25,16 +24,15 @@ struct BalanceForTests;
struct ChainedCallForTests;
struct IdForTests;
struct AccountWithMetadataForTests;
#[cfg(feature = "nssa")]
struct PrivateKeysForTests;
#[cfg(feature = "nssa")]
struct IdForExeTests;
#[cfg(feature = "nssa")]
struct BalanceForExeTests;
#[cfg(feature = "nssa")]
struct AccountsForExeTests;
#[cfg(feature = "nssa")]
impl PrivateKeysForTests {
fn user_token_a_key() -> PrivateKey {
PrivateKey::try_new([31; 32]).expect("Keys constructor expects valid private key")
@ -1008,7 +1006,6 @@ impl AccountWithMetadataForTests {
}
}
#[cfg(feature = "nssa")]
impl BalanceForExeTests {
fn user_token_a_holding_init() -> u128 {
10_000
@ -1172,7 +1169,6 @@ impl BalanceForExeTests {
}
}
#[cfg(feature = "nssa")]
impl IdForExeTests {
fn pool_definition_id() -> AccountId {
amm_core::compute_pool_pda(
@ -1229,7 +1225,6 @@ impl IdForExeTests {
}
}
#[cfg(feature = "nssa")]
impl AccountsForExeTests {
fn user_token_a_holding() -> Account {
Account {
@ -2641,7 +2636,6 @@ fn new_definition_lp_symmetric_amounts() {
assert_eq!(chained_call_lp, expected_lp_call);
}
#[cfg(feature = "nssa")]
fn state_for_amm_tests() -> V02State {
let initial_data = [];
let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]);
@ -2685,7 +2679,6 @@ fn state_for_amm_tests() -> V02State {
state
}
#[cfg(feature = "nssa")]
fn state_for_amm_tests_with_new_def() -> V02State {
let initial_data = [];
let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]);
@ -2708,7 +2701,6 @@ fn state_for_amm_tests_with_new_def() -> V02State {
state
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_remove() {
let mut state = state_for_amm_tests();
@ -2768,7 +2760,6 @@ fn simple_amm_remove() {
assert_eq!(user_token_lp_post, expected_user_token_lp);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() {
let mut state = state_for_amm_tests_with_new_def();
@ -2849,7 +2840,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() {
assert_eq!(user_token_lp_post, expected_user_token_lp);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() {
let mut state = state_for_amm_tests_with_new_def();
@ -2934,7 +2924,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() {
assert_eq!(user_token_lp_post, expected_user_token_lp);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_new_definition_uninitialized_pool() {
let mut state = state_for_amm_tests_with_new_def();
@ -3007,7 +2996,6 @@ fn simple_amm_new_definition_uninitialized_pool() {
assert_eq!(user_token_lp_post, expected_user_token_lp);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_add() {
let mut state = state_for_amm_tests();
@ -3070,7 +3058,6 @@ fn simple_amm_add() {
assert_eq!(user_token_lp_post, expected_user_token_lp);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_swap_1() {
let mut state = state_for_amm_tests();
@ -3122,7 +3109,6 @@ fn simple_amm_swap_1() {
assert_eq!(user_token_b_post, expected_user_token_b);
}
#[cfg(feature = "nssa")]
#[test]
fn simple_amm_swap_2() {
let mut state = state_for_amm_tests();

View File

@ -7,7 +7,7 @@ use common::{
transaction::NSSATransaction,
};
use nssa::V02State;
use storage::sequencer::RocksDBIO;
use storage::{error::DbError, sequencer::RocksDBIO};
pub struct SequencerStore {
dbio: RocksDBIO,
@ -42,8 +42,8 @@ impl SequencerStore {
})
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
Ok(self.dbio.get_block(id)?)
pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>, DbError> {
self.dbio.get_block(id)
}
pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> {
@ -56,16 +56,20 @@ impl SequencerStore {
/// Returns the transaction corresponding to the given hash, if it exists in the blockchain.
pub fn get_transaction_by_hash(&self, hash: HashType) -> Option<NSSATransaction> {
let block_id = self.tx_hash_to_block_map.get(&hash);
let block = block_id.map(|&id| self.get_block_at_id(id));
if let Some(Ok(block)) = block {
for transaction in block.body.transactions {
if transaction.hash() == hash {
return Some(transaction);
}
let block_id = *self.tx_hash_to_block_map.get(&hash)?;
let block = self
.get_block_at_id(block_id)
.ok()
.flatten()
.expect("Block should be present since the hash is in the map");
for transaction in block.body.transactions {
if transaction.hash() == hash {
return Some(transaction);
}
}
None
panic!(
"Transaction hash was in the map but transaction was not found in the block. This should never happen."
);
}
pub fn latest_block_meta(&self) -> Result<BlockMeta> {
@ -244,7 +248,7 @@ mod tests {
node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Verify initial status is Pending
let retrieved_block = node_store.get_block_at_id(block_id).unwrap();
let retrieved_block = node_store.get_block_at_id(block_id).unwrap().unwrap();
assert!(matches!(
retrieved_block.bedrock_status,
common::block::BedrockStatus::Pending
@ -254,7 +258,7 @@ mod tests {
node_store.mark_block_as_finalized(block_id).unwrap();
// Verify status is now Finalized
let finalized_block = node_store.get_block_at_id(block_id).unwrap();
let finalized_block = node_store.get_block_at_id(block_id).unwrap().unwrap();
assert!(matches!(
finalized_block.bedrock_status,
common::block::BedrockStatus::Finalized

View File

@ -22,8 +22,6 @@ use url::Url;
pub struct SequencerConfig {
/// Home dir of sequencer storage.
pub home: PathBuf,
/// Override rust log (env var logging level).
pub override_rust_log: Option<String>,
/// Genesis id.
pub genesis_id: u64,
/// If `True`, then adds random sequence of bytes to genesis block.
@ -41,8 +39,6 @@ pub struct SequencerConfig {
/// Interval in which pending blocks are retried.
#[serde(with = "humantime_serde")]
pub retry_pending_blocks_timeout: Duration,
/// Port to listen.
pub port: u16,
/// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments.

View File

@ -15,6 +15,7 @@ use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SI
use mempool::{MemPool, MemPoolHandle};
#[cfg(feature = "mock")]
pub use mock::SequencerCoreWithMockClients;
pub use storage::error::DbError;
use crate::{
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId},
@ -392,14 +393,12 @@ mod tests {
SequencerConfig {
home,
override_rust_log: Some("info".to_owned()),
genesis_id: 1,
is_genesis_random: false,
max_num_tx_in_block: 10,
max_block_size: bytesize::ByteSize::mib(1),
mempool_max_size: 10000,
block_create_timeout: Duration::from_secs(1),
port: 8080,
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
@ -480,7 +479,6 @@ mod tests {
assert_eq!(sequencer.chain_height, config.genesis_id);
assert_eq!(sequencer.sequencer_config.max_num_tx_in_block, 10);
assert_eq!(sequencer.sequencer_config.port, 8080);
let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id;
@ -698,6 +696,7 @@ mod tests {
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
// Only one should be included in the block
@ -725,6 +724,7 @@ mod tests {
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
@ -736,6 +736,7 @@ mod tests {
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert!(block.body.transactions.is_empty());
}
@ -770,6 +771,7 @@ mod tests {
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
}
@ -888,6 +890,7 @@ mod tests {
let new_block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(

View File

@ -1,5 +1,5 @@
[package]
name = "sequencer_runner"
name = "sequencer_service"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
@ -9,20 +9,25 @@ workspace = true
[dependencies]
common.workspace = true
nssa.workspace = true
mempool.workspace = true
sequencer_core = { workspace = true, features = ["testnet"] }
sequencer_rpc.workspace = true
sequencer_service_protocol.workspace = true
sequencer_service_rpc = { workspace = true, features = ["server"] }
indexer_service_rpc = { workspace = true, features = ["client"] }
clap = { workspace = true, features = ["derive", "env"] }
anyhow.workspace = true
env_logger.workspace = true
log.workspace = true
actix.workspace = true
actix-web.workspace = true
tokio.workspace = true
tokio-util.workspace = true
jsonrpsee.workspace = true
futures.workspace = true
bytesize.workspace = true
borsh.workspace = true
[features]
default = []
# Runs the sequencer in standalone mode without depending on Bedrock and Indexer services.
standalone = ["sequencer_core/mock", "sequencer_rpc/standalone"]
standalone = ["sequencer_core/mock"]

View File

@ -40,7 +40,7 @@ RUN r0vm --version
# Install logos blockchain circuits
RUN curl -sSL https://raw.githubusercontent.com/logos-blockchain/logos-blockchain/main/scripts/setup-logos-blockchain-circuits.sh | bash
WORKDIR /sequencer_runner
WORKDIR /sequencer_service
# Build argument to enable standalone feature (defaults to false)
ARG STANDALONE=false
@ -48,17 +48,17 @@ ARG STANDALONE=false
# Planner stage - generates dependency recipe
FROM chef AS planner
COPY . .
RUN cargo chef prepare --bin sequencer_runner --recipe-path recipe.json
RUN cargo chef prepare --bin sequencer_service --recipe-path recipe.json
# Builder stage - builds dependencies and application
FROM chef AS builder
ARG STANDALONE
COPY --from=planner /sequencer_runner/recipe.json recipe.json
COPY --from=planner /sequencer_service/recipe.json recipe.json
# Build dependencies only (this layer will be cached)
RUN if [ "$STANDALONE" = "true" ]; then \
cargo chef cook --bin sequencer_runner --features standalone --release --recipe-path recipe.json; \
cargo chef cook --bin sequencer_service --features standalone --release --recipe-path recipe.json; \
else \
cargo chef cook --bin sequencer_runner --release --recipe-path recipe.json; \
cargo chef cook --bin sequencer_service --release --recipe-path recipe.json; \
fi
# Copy source code
@ -66,13 +66,13 @@ COPY . .
# Build the actual application
RUN if [ "$STANDALONE" = "true" ]; then \
cargo build --release --features standalone --bin sequencer_runner; \
cargo build --release --features standalone --bin sequencer_service; \
else \
cargo build --release --bin sequencer_runner; \
cargo build --release --bin sequencer_service; \
fi
# Strip debug symbols to reduce binary size
RUN strip /sequencer_runner/target/release/sequencer_runner
RUN strip /sequencer_service/target/release/sequencer_service
# Runtime stage - minimal image
FROM debian:trixie-slim
@ -84,11 +84,11 @@ RUN apt-get update \
# Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash sequencer_user && \
mkdir -p /sequencer_runner /etc/sequencer_runner && \
chown -R sequencer_user:sequencer_user /sequencer_runner /etc/sequencer_runner
mkdir -p /sequencer_service /etc/sequencer_service && \
chown -R sequencer_user:sequencer_user /sequencer_service /etc/sequencer_service
# Copy binary from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_runner/target/release/sequencer_runner /usr/local/bin/sequencer_runner
COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_service/target/release/sequencer_service /usr/local/bin/sequencer_service
# Copy r0vm binary from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /usr/local/bin/r0vm
@ -97,7 +97,7 @@ COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /u
COPY --from=builder --chown=sequencer_user:sequencer_user /root/.logos-blockchain-circuits /home/sequencer_user/.logos-blockchain-circuits
# Copy entrypoint script
COPY sequencer_runner/docker-entrypoint.sh /docker-entrypoint.sh
COPY sequencer/service/docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh
# Expose default port
@ -124,5 +124,5 @@ USER root
ENTRYPOINT ["/docker-entrypoint.sh"]
WORKDIR /sequencer_runner
CMD ["sequencer_runner", "/etc/sequencer_runner"]
WORKDIR /sequencer_service
CMD ["sequencer_service", "/etc/sequencer_service/sequencer_config.json"]

View File

@ -1,6 +1,5 @@
{
"home": ".",
"override_rust_log": null,
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
@ -8,7 +7,6 @@
"mempool_max_size": 1000,
"block_create_timeout": "15s",
"retry_pending_blocks_timeout": "5s",
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay": "100ms",
@ -20,50 +18,50 @@
"indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
"npk": [
139,
19,
158,
11,
87,
38,
254,
159,
155,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
85,
206,
132,
228,
220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
],
"account": {
"program_owner": [
0,
@ -82,38 +80,38 @@
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
173,
134,
135,
210,
143,
87,
232,
33,
223,
54,
226,
10,
71,
215,
128,
194,
120,
113,
224,
4,
165
254,
143,
172,
24,
244,
243,
208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
],
"account": {
"program_owner": [
@ -166,4 +164,4 @@
37,
37
]
}
}

View File

@ -1,13 +1,11 @@
{
"home": "/var/lib/sequencer_runner",
"override_rust_log": null,
"home": "/var/lib/sequencer_service",
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
"max_block_size": "1 MiB",
"mempool_max_size": 10000,
"block_create_timeout": "10s",
"port": 3040,
"retry_pending_blocks_timeout": "7s",
"bedrock_config": {
"backoff": {
@ -20,49 +18,49 @@
"indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
63,
202,
178,
139,
19,
158,
11,
155,
231,
183,
82,
237,
212,
216,
221,
215,
255,
153,
101,
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
85,
206,
132,
228,
97
220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
],
"account": {
"program_owner": [
@ -82,38 +80,38 @@
},
{
"npk": [
192,
251,
166,
243,
167,
236,
84,
249,
35,
136,
130,
173,
134,
33,
223,
54,
226,
10,
71,
215,
254,
143,
172,
219,
225,
161,
139,
229,
89,
243,
125,
194,
213,
209,
30,
23,
174,
100,
24,
244,
124,
74,
140,
47
243,
208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
],
"account": {
"program_owner": [

View File

@ -0,0 +1,14 @@
services:
sequencer_service:
image: lssa/sequencer_service
build:
context: ../..
dockerfile: sequencer/service/Dockerfile
container_name: sequencer_service
ports:
- "3040:3040"
volumes:
# Mount configuration file
- ./configs/docker/sequencer_config.json:/etc/sequencer_service/sequencer_config.json
# Mount data folder
- ./data:/var/lib/sequencer_service

View File

@ -1,11 +1,11 @@
#!/bin/sh
# This is an entrypoint script for the sequencer_runner Docker container,
# This is an entrypoint script for the sequencer_service Docker container,
# it's not meant to be executed outside of the container.
set -e
CONFIG="/etc/sequencer_runner/sequencer_config.json"
CONFIG="/etc/sequencer/service/sequencer_config.json"
# Check config file exists
if [ ! -f "$CONFIG" ]; then

View File

@ -0,0 +1,13 @@
[package]
name = "sequencer_service_protocol"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
nssa.workspace = true
nssa_core.workspace = true

View File

@ -0,0 +1,9 @@
//! Reexports of types used by sequencer rpc specification.
pub use common::{
HashType,
block::{Block, BlockId},
transaction::NSSATransaction,
};
pub use nssa::{Account, AccountId, ProgramId};
pub use nssa_core::{Commitment, MembershipProof, account::Nonce};

View File

@ -0,0 +1,17 @@
[package]
name = "sequencer_service_rpc"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
sequencer_service_protocol.workspace = true
jsonrpsee = { workspace = true, features = ["macros"] }
[features]
client = ["jsonrpsee/client"]
server = ["jsonrpsee/server"]

View File

@ -0,0 +1,92 @@
use std::collections::BTreeMap;
use jsonrpsee::proc_macros::rpc;
#[cfg(feature = "server")]
use jsonrpsee::types::ErrorObjectOwned;
#[cfg(feature = "client")]
pub use jsonrpsee::{core::ClientError, http_client::HttpClientBuilder as SequencerClientBuilder};
use sequencer_service_protocol::{
Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, NSSATransaction,
Nonce, ProgramId,
};
#[cfg(all(not(feature = "server"), not(feature = "client")))]
compile_error!("At least one of `server` or `client` features must be enabled.");
/// Type alias for RPC client. Only available when `client` feature is enabled.
///
/// It's cheap to clone this client, so it can be cloned and shared across the application.
///
/// # Example
///
/// ```no_run
/// use common::transaction::NSSATransaction;
/// use sequencer_service_rpc::{RpcClient as _, SequencerClientBuilder};
///
/// let url = "http://localhost:3040".parse()?;
/// let client = SequencerClientBuilder::default().build(url)?;
///
/// let tx: NSSATransaction = unimplemented!("Construct your transaction here");
/// let tx_hash = client.send_transaction(tx).await?;
/// ```
#[cfg(feature = "client")]
pub type SequencerClient = jsonrpsee::http_client::HttpClient;
#[cfg_attr(all(feature = "server", not(feature = "client")), rpc(server))]
#[cfg_attr(all(feature = "client", not(feature = "server")), rpc(client))]
#[cfg_attr(all(feature = "server", feature = "client"), rpc(server, client))]
pub trait Rpc {
#[method(name = "sendTransaction")]
async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned>;
// TODO: expand healthcheck response into some kind of report
#[method(name = "checkHealth")]
async fn check_health(&self) -> Result<(), ErrorObjectOwned>;
// TODO: These functions should be removed after wallet starts using indexer
// for this type of queries.
//
// =============================================================================================
#[method(name = "getBlock")]
async fn get_block(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getBlockRange")]
async fn get_block_range(
&self,
start_block_id: BlockId,
end_block_id: BlockId,
) -> Result<Vec<Block>, ErrorObjectOwned>;
#[method(name = "getLastBlockId")]
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getAccountBalance")]
async fn get_account_balance(&self, account_id: AccountId) -> Result<u128, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<NSSATransaction>, ErrorObjectOwned>;
#[method(name = "getAccountsNonces")]
async fn get_accounts_nonces(
&self,
account_ids: Vec<AccountId>,
) -> Result<Vec<Nonce>, ErrorObjectOwned>;
#[method(name = "getProofForCommitment")]
async fn get_proof_for_commitment(
&self,
commitment: Commitment,
) -> Result<Option<MembershipProof>, ErrorObjectOwned>;
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getProgramIds")]
async fn get_program_ids(&self) -> Result<BTreeMap<String, ProgramId>, ErrorObjectOwned>;
// =============================================================================================
}

View File

@ -1,59 +1,75 @@
use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
use std::{net::SocketAddr, sync::Arc, time::Duration};
use actix_web::dev::ServerHandle;
use anyhow::{Context as _, Result};
use clap::Parser;
use common::rpc_primitives::RpcConfig;
use futures::{FutureExt as _, never::Never};
use anyhow::{Context as _, Result, anyhow};
use bytesize::ByteSize;
use common::transaction::NSSATransaction;
use futures::never::Never;
use jsonrpsee::server::ServerHandle;
#[cfg(not(feature = "standalone"))]
use log::warn;
use log::{error, info};
use mempool::MemPoolHandle;
#[cfg(feature = "standalone")]
use sequencer_core::SequencerCoreWithMockClients as SequencerCore;
use sequencer_core::config::SequencerConfig;
pub use sequencer_core::config::*;
#[cfg(not(feature = "standalone"))]
use sequencer_core::{SequencerCore, block_settlement_client::BlockSettlementClientTrait as _};
use sequencer_rpc::new_http_server;
use sequencer_service_rpc::RpcServer as _;
use tokio::{sync::Mutex, task::JoinHandle};
pub const RUST_LOG: &str = "RUST_LOG";
pub mod service;
#[derive(Parser, Debug)]
#[clap(version)]
struct Args {
/// Path to configs.
home_dir: PathBuf,
}
const REQUEST_BODY_MAX_SIZE: ByteSize = ByteSize::mib(10);
/// Handle to manage the sequencer and its tasks.
///
/// Implements `Drop` to ensure all tasks are aborted and the HTTP server is stopped when dropped.
/// Implements `Drop` to ensure all tasks are aborted and the RPC server is stopped when dropped.
pub struct SequencerHandle {
addr: SocketAddr,
http_server_handle: ServerHandle,
/// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>,
main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
}
impl SequencerHandle {
/// Runs the sequencer indefinitely, monitoring its tasks.
///
/// If no error occurs, this function will never return.
const fn new(
addr: SocketAddr,
server_handle: ServerHandle,
main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
) -> Self {
Self {
addr,
server_handle: Some(server_handle),
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
}
}
/// Wait for any of the sequencer tasks to fail and return the error.
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
pub async fn run_forever(&mut self) -> Result<Never> {
pub async fn failed(mut self) -> Result<Never> {
let Self {
addr: _,
http_server_handle: _,
server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = self;
} = &mut self;
let server_handle = server_handle.take().expect("Server handle is set");
tokio::select! {
() = server_handle.stopped() => {
Err(anyhow!("RPC Server stopped"))
}
res = main_loop_handle => {
res
.context("Main loop task panicked")?
@ -72,11 +88,25 @@ impl SequencerHandle {
}
}
/// Check if all Sequencer tasks are still running.
///
/// Return `false` if any of the tasks has failed and `true` otherwise.
/// Error of the failed task can be retrieved by awaiting on [`Self::failed()`].
#[must_use]
pub fn is_finished(&self) -> bool {
self.main_loop_handle.is_finished()
|| self.retry_pending_blocks_loop_handle.is_finished()
|| self.listen_for_bedrock_blocks_loop_handle.is_finished()
pub fn is_healthy(&self) -> bool {
let Self {
addr: _,
server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = self;
let stopped = server_handle.as_ref().is_none_or(ServerHandle::is_stopped)
|| main_loop_handle.is_finished()
|| retry_pending_blocks_loop_handle.is_finished()
|| listen_for_bedrock_blocks_loop_handle.is_finished();
!stopped
}
#[must_use]
@ -89,7 +119,7 @@ impl Drop for SequencerHandle {
fn drop(&mut self) {
let Self {
addr: _,
http_server_handle,
server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
@ -99,31 +129,35 @@ impl Drop for SequencerHandle {
retry_pending_blocks_loop_handle.abort();
listen_for_bedrock_blocks_loop_handle.abort();
// Can't wait here as Drop can't be async, but anyway stop signal should be sent
http_server_handle.stop(true).now_or_never();
let Some(handle) = server_handle else {
return;
};
if let Err(err) = handle.stop() {
error!("An error occurred while stopping Sequencer RPC server: {err}");
}
}
}
pub async fn startup_sequencer(app_config: SequencerConfig) -> Result<SequencerHandle> {
let block_timeout = app_config.block_create_timeout;
let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout;
let port = app_config.port;
pub async fn run(config: SequencerConfig, port: u16) -> Result<SequencerHandle> {
let block_timeout = config.block_create_timeout;
let retry_pending_blocks_timeout = config.retry_pending_blocks_timeout;
let max_block_size = config.max_block_size;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config).await;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(config).await;
info!("Sequencer core set up");
let seq_core_wrapped = Arc::new(Mutex::new(sequencer_core));
let (http_server, addr) = new_http_server(
RpcConfig::with_port(port),
let (server_handle, addr) = run_server(
Arc::clone(&seq_core_wrapped),
mempool_handle,
port,
max_block_size.as_u64(),
)
.await?;
info!("HTTP server started");
let http_server_handle = http_server.handle();
tokio::spawn(http_server);
info!("RPC server started");
#[cfg(not(feature = "standalone"))]
{
@ -146,13 +180,42 @@ pub async fn startup_sequencer(app_config: SequencerConfig) -> Result<SequencerH
let listen_for_bedrock_blocks_loop_handle =
tokio::spawn(listen_for_bedrock_blocks_loop(seq_core_wrapped));
Ok(SequencerHandle {
Ok(SequencerHandle::new(
addr,
http_server_handle,
server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
})
))
}
async fn run_server(
sequencer: Arc<Mutex<SequencerCore>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
port: u16,
max_block_size: u64,
) -> Result<(ServerHandle, SocketAddr)> {
let server = jsonrpsee::server::ServerBuilder::with_config(
jsonrpsee::server::ServerConfigBuilder::new()
.max_request_body_size(
u32::try_from(REQUEST_BODY_MAX_SIZE.as_u64())
.expect("REQUEST_BODY_MAX_SIZE should be less than u32::MAX"),
)
.build(),
)
.build(SocketAddr::from(([0, 0, 0, 0], port)))
.await
.context("Failed to build RPC server")?;
let addr = server
.local_addr()
.context("Failed to get local address of RPC server")?;
info!("Starting Sequencer Service RPC server on {addr}");
let service = service::SequencerService::new(sequencer, mempool_handle, max_block_size);
let handle = server.start(service.into_rpc());
Ok((handle, addr))
}
async fn main_loop(seq_core: Arc<Mutex<SequencerCore>>, block_timeout: Duration) -> Result<Never> {
@ -210,7 +273,7 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
.create_inscribe_tx(block)
.context("Failed to create inscribe tx for pending block")?;
debug!(">>>> Create inscribe: {:?}", now.elapsed());
debug!("Create inscribe: {:?}", now.elapsed());
let now = Instant::now();
if let Err(e) = block_settlement_client
@ -222,7 +285,7 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
block.header.block_id
);
}
debug!(">>>> Post: {:?}", now.elapsed());
debug!("Post: {:?}", now.elapsed());
}
Ok(())
}
@ -287,33 +350,3 @@ async fn retry_pending_blocks_loop(
) -> Result<Never> {
std::future::pending::<Result<Never>>().await
}
pub async fn main_runner() -> Result<()> {
env_logger::init();
let args = Args::parse();
let Args { home_dir } = args;
let app_config = SequencerConfig::from_path(&home_dir.join("sequencer_config.json"))?;
if let Some(rust_log) = &app_config.override_rust_log {
info!("RUST_LOG env var set to {rust_log:?}");
// SAFETY: there is no other threads running at this point
unsafe {
std::env::set_var(RUST_LOG, rust_log);
}
}
// ToDo: Add restart on failures
let mut sequencer_handle = startup_sequencer(app_config).await?;
info!("Sequencer running. Monitoring concurrent tasks...");
let Err(err) = sequencer_handle.run_forever().await;
error!("Sequencer failed: {err:#}");
info!("Shutting down sequencer...");
Ok(())
}

View File

@ -0,0 +1,60 @@
use std::path::PathBuf;
use anyhow::Result;
use clap::Parser;
use log::{error, info};
use tokio_util::sync::CancellationToken;
#[derive(Debug, Parser)]
#[clap(version)]
struct Args {
#[clap(name = "config")]
config_path: PathBuf,
#[clap(short, long, default_value = "3040")]
port: u16,
}
#[tokio::main]
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
async fn main() -> Result<()> {
env_logger::init();
let Args { config_path, port } = Args::parse();
let cancellation_token = listen_for_shutdown_signal();
let config = sequencer_service::SequencerConfig::from_path(&config_path)?;
let sequencer_handle = sequencer_service::run(config, port).await?;
tokio::select! {
() = cancellation_token.cancelled() => {
info!("Shutting down sequencer...");
}
Err(err) = sequencer_handle.failed() => {
error!("Sequencer failed unexpectedly: {err}");
}
}
info!("Sequencer shutdown complete");
Ok(())
}
fn listen_for_shutdown_signal() -> CancellationToken {
let cancellation_token = CancellationToken::new();
let cancellation_token_clone = cancellation_token.clone();
tokio::spawn(async move {
if let Err(err) = tokio::signal::ctrl_c().await {
error!("Failed to listen for Ctrl-C signal: {err}");
return;
}
info!("Received Ctrl-C signal");
cancellation_token_clone.cancel();
});
cancellation_token
}

View File

@ -0,0 +1,183 @@
use std::{collections::BTreeMap, sync::Arc};
use common::transaction::NSSATransaction;
use jsonrpsee::{
core::async_trait,
types::{ErrorCode, ErrorObjectOwned},
};
use log::warn;
use mempool::MemPoolHandle;
use nssa::{self, program::Program};
use sequencer_core::{
DbError, SequencerCore, block_settlement_client::BlockSettlementClientTrait,
indexer_client::IndexerClientTrait,
};
use sequencer_service_protocol::{
Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, Nonce, ProgramId,
};
use tokio::sync::Mutex;
const NOT_FOUND_ERROR_CODE: i32 = -31999;
pub struct SequencerService<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> {
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64,
}
impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerService<BC, IC> {
pub const fn new(
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64,
) -> Self {
Self {
sequencer,
mempool_handle,
max_block_size,
}
}
}
#[async_trait]
impl<BC: BlockSettlementClientTrait + Send + 'static, IC: IndexerClientTrait + Send + 'static>
sequencer_service_rpc::RpcServer for SequencerService<BC, IC>
{
async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned> {
// Reserve ~200 bytes for block header overhead
const BLOCK_HEADER_OVERHEAD: u64 = 200;
let tx_hash = tx.hash();
let encoded_tx =
borsh::to_vec(&tx).expect("Transaction borsh serialization should not fail");
let tx_size = u64::try_from(encoded_tx.len()).expect("Transaction size should fit in u64");
let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD);
if tx_size > max_tx_size {
return Err(ErrorObjectOwned::owned(
ErrorCode::InvalidParams.code(),
format!("Transaction too large: size {tx_size}, max {max_tx_size}"),
None::<()>,
));
}
let authenticated_tx = tx
.transaction_stateless_check()
.inspect_err(|err| warn!("Error at pre_check {err:#?}"))
.map_err(|err| {
ErrorObjectOwned::owned(
ErrorCode::InvalidParams.code(),
format!("{err:?}"),
None::<()>,
)
})?;
self.mempool_handle
.push(authenticated_tx)
.await
.expect("Mempool is closed, this is a bug");
Ok(tx_hash)
}
async fn check_health(&self) -> Result<(), ErrorObjectOwned> {
Ok(())
}
async fn get_block(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
sequencer
.block_store()
.get_block_at_id(block_id)
.map_err(|err| internal_error(&err))
}
async fn get_block_range(
&self,
start_block_id: BlockId,
end_block_id: BlockId,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
(start_block_id..=end_block_id)
.map(|block_id| {
let block = sequencer
.block_store()
.get_block_at_id(block_id)
.map_err(|err| internal_error(&err))?;
block.ok_or_else(|| {
ErrorObjectOwned::owned(
NOT_FOUND_ERROR_CODE,
format!("Block with id {block_id} not found"),
None::<()>,
)
})
})
.collect::<Result<Vec<_>, _>>()
}
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.chain_height())
}
async fn get_account_balance(&self, account_id: AccountId) -> Result<u128, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
let account = sequencer.state().get_account_by_id(account_id);
Ok(account.balance)
}
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<NSSATransaction>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.block_store().get_transaction_by_hash(tx_hash))
}
async fn get_accounts_nonces(
&self,
account_ids: Vec<AccountId>,
) -> Result<Vec<Nonce>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
let nonces = account_ids
.into_iter()
.map(|account_id| sequencer.state().get_account_by_id(account_id).nonce)
.collect();
Ok(nonces)
}
async fn get_proof_for_commitment(
&self,
commitment: Commitment,
) -> Result<Option<MembershipProof>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.state().get_proof_for_commitment(&commitment))
}
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.state().get_account_by_id(account_id))
}
async fn get_program_ids(&self) -> Result<BTreeMap<String, ProgramId>, ErrorObjectOwned> {
let mut program_ids = BTreeMap::new();
program_ids.insert(
"authenticated_transfer".to_owned(),
Program::authenticated_transfer_program().id(),
);
program_ids.insert("token".to_owned(), Program::token().id());
program_ids.insert("pinata".to_owned(), Program::pinata().id());
program_ids.insert("amm".to_owned(), Program::amm().id());
program_ids.insert(
"privacy_preserving_circuit".to_owned(),
nssa::PRIVACY_PRESERVING_CIRCUIT_ID,
);
Ok(program_ids)
}
}
fn internal_error(err: &DbError) -> ErrorObjectOwned {
ErrorObjectOwned::owned(ErrorCode::InternalError.code(), err.to_string(), None::<()>)
}

View File

@ -1,39 +0,0 @@
[package]
name = "sequencer_rpc"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa.workspace = true
common.workspace = true
mempool.workspace = true
sequencer_core = { workspace = true }
bedrock_client.workspace = true
anyhow.workspace = true
serde_json.workspace = true
log.workspace = true
serde.workspace = true
actix-cors.workspace = true
futures.workspace = true
base58.workspace = true
hex.workspace = true
tempfile.workspace = true
base64.workspace = true
itertools.workspace = true
actix-web.workspace = true
tokio.workspace = true
borsh.workspace = true
bytesize.workspace = true
[dev-dependencies]
sequencer_core = { workspace = true, features = ["mock"] }
[features]
default = []
# Includes types to run the sequencer in standalone mode
standalone = ["sequencer_core/mock"]

View File

@ -1,55 +0,0 @@
use std::sync::Arc;
use common::{
rpc_primitives::errors::{RpcError, RpcErrorKind},
transaction::NSSATransaction,
};
use mempool::MemPoolHandle;
pub use net_utils::*;
#[cfg(feature = "standalone")]
use sequencer_core::mock::{MockBlockSettlementClient, MockIndexerClient};
use sequencer_core::{
SequencerCore,
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait},
indexer_client::{IndexerClient, IndexerClientTrait},
};
use serde::Serialize;
use serde_json::Value;
use tokio::sync::Mutex;
use self::types::err_rpc::RpcErr;
pub mod net_utils;
pub mod process;
pub mod types;
#[cfg(feature = "standalone")]
pub type JsonHandlerWithMockClients = JsonHandler<MockBlockSettlementClient, MockIndexerClient>;
// ToDo: Add necessary fields
pub struct JsonHandler<
BC: BlockSettlementClientTrait = BlockSettlementClient,
IC: IndexerClientTrait = IndexerClient,
> {
sequencer_state: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: usize,
}
fn respond<T: Serialize>(val: T) -> Result<Value, RpcErr> {
Ok(serde_json::to_value(val)?)
}
#[must_use]
pub fn rpc_error_responce_inverter(err: RpcError) -> RpcError {
let content = err.error_struct.map(|error| match error {
RpcErrorKind::HandlerError(val) | RpcErrorKind::InternalError(val) => val,
RpcErrorKind::RequestValidationError(vall) => serde_json::to_value(vall).unwrap(),
});
RpcError {
error_struct: None,
code: err.code,
message: err.message,
data: content,
}
}

View File

@ -1,104 +0,0 @@
use std::{io, net::SocketAddr, sync::Arc};
use actix_cors::Cors;
use actix_web::{App, Error as HttpError, HttpResponse, HttpServer, http, middleware, web};
use common::{
rpc_primitives::{RpcConfig, message::Message},
transaction::NSSATransaction,
};
use futures::{Future, FutureExt as _};
use log::info;
use mempool::MemPoolHandle;
#[cfg(not(feature = "standalone"))]
use sequencer_core::SequencerCore;
#[cfg(feature = "standalone")]
use sequencer_core::SequencerCoreWithMockClients as SequencerCore;
use tokio::sync::Mutex;
#[cfg(not(feature = "standalone"))]
use super::JsonHandler;
use crate::process::Process;
pub const SHUTDOWN_TIMEOUT_SECS: u64 = 10;
pub const NETWORK: &str = "network";
#[cfg(feature = "standalone")]
type JsonHandler = super::JsonHandlerWithMockClients;
pub(crate) fn rpc_handler<P: Process>(
message: web::Json<Message>,
handler: web::Data<P>,
) -> impl Future<Output = Result<HttpResponse, HttpError>> {
let response = async move {
let message = handler.process(message.0).await?;
Ok(HttpResponse::Ok().json(&message))
};
response.boxed()
}
fn get_cors(cors_allowed_origins: &[String]) -> Cors {
let mut cors = Cors::permissive();
if cors_allowed_origins != ["*".to_owned()] {
for origin in cors_allowed_origins {
cors = cors.allowed_origin(origin);
}
}
cors.allowed_methods(vec!["GET", "POST"])
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.max_age(3600)
}
pub async fn new_http_server(
config: RpcConfig,
seuquencer_core: Arc<Mutex<SequencerCore>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
) -> io::Result<(actix_web::dev::Server, SocketAddr)> {
let RpcConfig {
addr,
cors_allowed_origins,
limits_config,
} = config;
info!(target:NETWORK, "Starting HTTP server at {addr}");
let max_block_size = seuquencer_core
.lock()
.await
.sequencer_config()
.max_block_size
.as_u64()
.try_into()
.expect("`max_block_size` is expected to fit into usize");
let handler = web::Data::new(JsonHandler {
sequencer_state: Arc::clone(&seuquencer_core),
mempool_handle,
max_block_size,
});
// HTTP server
let http_server = HttpServer::new(move || {
let json_limit = limits_config
.json_payload_max_size
.as_u64()
.try_into()
.expect("`json_payload_max_size` is expected to fit into usize");
App::new()
.wrap(get_cors(&cors_allowed_origins))
.app_data(handler.clone())
.app_data(web::JsonConfig::default().limit(json_limit))
.wrap(middleware::Logger::default())
.service(web::resource("/").route(web::post().to(rpc_handler::<JsonHandler>)))
})
.bind(addr)?
.shutdown_timeout(SHUTDOWN_TIMEOUT_SECS)
.disable_signals();
let [final_addr] = http_server
.addrs()
.try_into()
.expect("Exactly one address bound is expected for sequencer HTTP server");
info!(target:NETWORK, "HTTP server started at {final_addr}");
Ok((http_server.run(), final_addr))
}

View File

@ -1,49 +0,0 @@
use common::{
rpc_primitives::errors::{RpcError, RpcParseError},
transaction::TransactionMalformationError,
};
macro_rules! standard_rpc_err_kind {
($type_name:path) => {
impl RpcErrKind for $type_name {
fn into_rpc_err(self) -> RpcError {
self.into()
}
}
};
}
pub struct RpcErr(pub RpcError);
pub type RpcErrInternal = anyhow::Error;
pub trait RpcErrKind: 'static {
fn into_rpc_err(self) -> RpcError;
}
impl<T: RpcErrKind> From<T> for RpcErr {
fn from(e: T) -> Self {
Self(e.into_rpc_err())
}
}
standard_rpc_err_kind!(RpcError);
standard_rpc_err_kind!(RpcParseError);
impl RpcErrKind for serde_json::Error {
fn into_rpc_err(self) -> RpcError {
RpcError::serialization_error(&self.to_string())
}
}
impl RpcErrKind for RpcErrInternal {
fn into_rpc_err(self) -> RpcError {
RpcError::new_internal_error(None, &format!("{self:#?}"))
}
}
impl RpcErrKind for TransactionMalformationError {
fn into_rpc_err(self) -> RpcError {
RpcError::invalid_params(Some(serde_json::to_value(self).unwrap()))
}
}

View File

@ -1 +0,0 @@
pub mod err_rpc;

View File

@ -1,14 +0,0 @@
services:
sequencer_runner:
image: lssa/sequencer_runner
build:
context: ..
dockerfile: sequencer_runner/Dockerfile
container_name: sequencer_runner
ports:
- "3040:3040"
volumes:
# Mount configuration folder
- ./configs/docker:/etc/sequencer_runner
# Mount data folder
- ./data:/var/lib/sequencer_runner

View File

@ -1,16 +0,0 @@
use anyhow::Result;
use sequencer_runner::main_runner;
pub const NUM_THREADS: usize = 4;
// TODO: Why it requires config as a directory and not as a file?
fn main() -> Result<()> {
actix::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(NUM_THREADS)
.enable_all()
.build()
.unwrap()
})
.block_on(main_runner())
}

View File

@ -514,7 +514,7 @@ impl RocksDBIO {
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Block> {
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
@ -530,16 +530,14 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
})?))
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
))
Ok(None)
}
}
@ -618,7 +616,7 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V02State> {
fn get_breakpoint(&self, br_id: u64) -> DbResult<V02State> {
let cf_br = self.breakpoint_column();
let res = self
.db
@ -641,6 +639,8 @@ impl RocksDBIO {
)
})?)
} else {
// Note: this is not a `DbError::NotFound` case, because we expect that all searched
// breakpoints will be present in db as this is an internal method.
Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(),
))
@ -665,7 +665,9 @@ impl RocksDBIO {
};
for id in start..=block_id {
let block = self.get_block(id)?;
let block = self.get_block(id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {id} not found"))
})?;
for transaction in block.body.transactions {
transaction
@ -686,9 +688,9 @@ impl RocksDBIO {
Ok(breakpoint)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
))
Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)))
}
}
@ -720,7 +722,7 @@ impl RocksDBIO {
// Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<u64> {
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column();
let res = self
.db
@ -736,17 +738,15 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|serr| {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?)
})?))
} else {
Err(DbError::db_interaction_error(
"Block on this hash not found".to_owned(),
))
Ok(None)
}
}
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<u64> {
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column();
let res = self
.db
@ -762,13 +762,11 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|serr| {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?)
})?))
} else {
Err(DbError::db_interaction_error(
"Block for this tx hash not found".to_owned(),
))
Ok(None)
}
}
@ -921,8 +919,14 @@ impl RocksDBIO {
let mut tx_batch = vec![];
for tx_hash in self.get_acc_transaction_hashes(acc_id, offset, limit)? {
let block_id = self.get_block_id_by_tx_hash(tx_hash)?;
let block = self.get_block(block_id)?;
let block_id = self.get_block_id_by_tx_hash(tx_hash)?.ok_or_else(|| {
DbError::db_interaction_error(format!(
"Block id not found for tx hash {tx_hash:#?}"
))
})?;
let block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
let transaction = block
.body
@ -1019,7 +1023,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(1).unwrap();
let last_block = dbio.get_block(1).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
@ -1056,7 +1060,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
@ -1087,7 +1091,7 @@ mod tests {
for i in 1..BREAKPOINT_INTERVAL {
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, u128::from(i - 1), true);
@ -1103,7 +1107,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_breakpoint = dbio.get_breakpoint(0).unwrap();
let breakpoint = dbio.get_breakpoint(1).unwrap();
let final_state = dbio.final_state().unwrap();
@ -1142,7 +1146,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true);
@ -1153,7 +1157,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true);
@ -1164,7 +1168,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true);
@ -1175,7 +1179,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true);
@ -1185,10 +1189,16 @@ mod tests {
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap();
let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap();
let control_block_id3 = dbio.get_block_id_by_tx_hash(control_tx_hash1.0).unwrap();
let control_block_id4 = dbio.get_block_id_by_tx_hash(control_tx_hash2.0).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap().unwrap();
let control_block_id3 = dbio
.get_block_id_by_tx_hash(control_tx_hash1.0)
.unwrap()
.unwrap();
let control_block_id4 = dbio
.get_block_id_by_tx_hash(control_tx_hash2.0)
.unwrap()
.unwrap();
assert_eq!(control_block_id1, 2);
assert_eq!(control_block_id2, 3);
@ -1207,7 +1217,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true);
@ -1217,7 +1227,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true);
@ -1227,7 +1237,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true);
@ -1237,7 +1247,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true);
@ -1285,7 +1295,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true);
@ -1297,7 +1307,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true);
@ -1309,7 +1319,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true);
@ -1321,7 +1331,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true);

View File

@ -442,7 +442,7 @@ impl RocksDBIO {
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Block> {
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
@ -458,16 +458,14 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
})?))
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
))
Ok(None)
}
}
@ -495,7 +493,7 @@ impl RocksDBIO {
})?)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
"NSSA state not found".to_owned(),
))
}
}
@ -512,9 +510,9 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
));
return Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)));
}
self.db
@ -525,7 +523,9 @@ impl RocksDBIO {
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?;
let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column();

View File

@ -13,8 +13,8 @@ crate-type = ["rlib", "cdylib", "staticlib"]
[dependencies]
wallet.workspace = true
nssa.workspace = true
common.workspace = true
nssa_core.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
tokio.workspace = true
[build-dependencies]

View File

@ -123,7 +123,7 @@ pub unsafe extern "C" fn wallet_ffi_get_private_account_keys(
};
// NPK is a 32-byte array
let npk_bytes = key_chain.nullifer_public_key.0;
let npk_bytes = key_chain.nullifier_public_key.0;
// VPK is a compressed secp256k1 point (33 bytes)
let vpk_bytes = key_chain.viewing_public_key.to_bytes();

View File

@ -28,7 +28,7 @@
use std::sync::OnceLock;
use common::error::ExecutionFailureKind;
use ::wallet::ExecutionFailureKind;
// Re-export public types for cbindgen
pub use error::WalletFfiError as FfiError;
use tokio::runtime::Handle;

View File

@ -75,10 +75,9 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata(
let pinata = Pinata(&wallet);
match block_on(pinata.claim(pinata_id, winner_id, solution)) {
Ok(response) => {
let tx_hash = CString::new(response.tx_hash.to_string())
Ok(tx_hash) => {
let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe {
(*out_result).tx_hash = tx_hash;
(*out_result).success = true;
@ -181,8 +180,8 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_already_initializ
pinata
.claim_private_owned_account_already_initialized(pinata_id, winner_id, solution, proof),
) {
Ok((response, _shared_key)) => {
let tx_hash = CString::new(response.tx_hash.to_string())
Ok((tx_hash, _shared_key)) => {
let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe {
@ -266,8 +265,8 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_not_initialized(
let pinata = Pinata(&wallet);
match block_on(pinata.claim_private_owned_account(pinata_id, winner_id, solution)) {
Ok((response, _shared_key)) => {
let tx_hash = CString::new(response.tx_hash.to_string())
Ok((tx_hash, _shared_key)) => {
let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe {

View File

@ -1,5 +1,7 @@
//! Block synchronization functions.
use sequencer_service_rpc::RpcClient as _;
use crate::{
block_on,
error::{print_error, WalletFfiError},
@ -134,10 +136,10 @@ pub unsafe extern "C" fn wallet_ffi_get_current_block_height(
}
};
match block_on(wallet.sequencer_client.get_last_block()) {
Ok(response) => {
match block_on(wallet.sequencer_client.get_last_block_id()) {
Ok(last_block_id) => {
unsafe {
*out_block_height = response.last_block;
*out_block_height = last_block_id;
}
WalletFfiError::Success
}

Some files were not shown because too many files have changed in this diff Show More