Merge branch 'main' into schouhy/add-block-context

This commit is contained in:
Sergio Chouhy 2026-03-20 09:49:50 -03:00
commit 607a34058d
126 changed files with 2226 additions and 3575 deletions

View File

@ -12,12 +12,12 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- name: sequencer_runner - name: sequencer_service
dockerfile: ./sequencer_runner/Dockerfile dockerfile: ./sequencer/service/Dockerfile
build_args: | build_args: |
STANDALONE=false STANDALONE=false
- name: sequencer_runner-standalone - name: sequencer_service-standalone
dockerfile: ./sequencer_runner/Dockerfile dockerfile: ./sequencer/service/Dockerfile
build_args: | build_args: |
STANDALONE=true STANDALONE=true
- name: indexer_service - name: indexer_service

2
.gitignore vendored
View File

@ -6,7 +6,7 @@ data/
.idea/ .idea/
.vscode/ .vscode/
rocksdb rocksdb
sequencer_runner/data/ sequencer/service/data/
storage.json storage.json
result result
wallet-ffi/wallet_ffi.h wallet-ffi/wallet_ffi.h

431
Cargo.lock generated
View File

@ -2,229 +2,6 @@
# It is not intended for manual editing. # It is not intended for manual editing.
version = 4 version = 4
[[package]]
name = "actix"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b"
dependencies = [
"actix-macros",
"actix-rt",
"actix_derive",
"bitflags 2.11.0",
"bytes",
"crossbeam-channel",
"futures-core",
"futures-sink",
"futures-task",
"futures-util",
"log",
"once_cell",
"parking_lot",
"pin-project-lite",
"smallvec",
"tokio",
"tokio-util",
]
[[package]]
name = "actix-codec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a"
dependencies = [
"bitflags 2.11.0",
"bytes",
"futures-core",
"futures-sink",
"memchr",
"pin-project-lite",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "actix-cors"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daa239b93927be1ff123eebada5a3ff23e89f0124ccb8609234e5103d5a5ae6d"
dependencies = [
"actix-utils",
"actix-web",
"derive_more",
"futures-util",
"log",
"once_cell",
"smallvec",
]
[[package]]
name = "actix-http"
version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f860ee6746d0c5b682147b2f7f8ef036d4f92fe518251a3a35ffa3650eafdf0e"
dependencies = [
"actix-codec",
"actix-rt",
"actix-service",
"actix-utils",
"bitflags 2.11.0",
"bytes",
"bytestring",
"derive_more",
"encoding_rs",
"foldhash",
"futures-core",
"http 0.2.12",
"httparse",
"httpdate",
"itoa",
"language-tags",
"mime",
"percent-encoding",
"pin-project-lite",
"smallvec",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "actix-macros"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb"
dependencies = [
"quote",
"syn 2.0.117",
]
[[package]]
name = "actix-router"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14f8c75c51892f18d9c46150c5ac7beb81c95f78c8b83a634d49f4ca32551fe7"
dependencies = [
"bytestring",
"cfg-if",
"http 0.2.12",
"regex-lite",
"serde",
"tracing",
]
[[package]]
name = "actix-rt"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63"
dependencies = [
"futures-core",
"tokio",
]
[[package]]
name = "actix-server"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a65064ea4a457eaf07f2fba30b4c695bf43b721790e9530d26cb6f9019ff7502"
dependencies = [
"actix-rt",
"actix-service",
"actix-utils",
"futures-core",
"futures-util",
"mio",
"socket2 0.5.10",
"tokio",
"tracing",
]
[[package]]
name = "actix-service"
version = "2.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e46f36bf0e5af44bdc4bdb36fbbd421aa98c79a9bce724e1edeb3894e10dc7f"
dependencies = [
"futures-core",
"pin-project-lite",
]
[[package]]
name = "actix-utils"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8"
dependencies = [
"local-waker",
"pin-project-lite",
]
[[package]]
name = "actix-web"
version = "4.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff87453bc3b56e9b2b23c1cc0b1be8797184accf51d2abe0f8a33ec275d316bf"
dependencies = [
"actix-codec",
"actix-http",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
"actix-utils",
"actix-web-codegen",
"bytes",
"bytestring",
"cfg-if",
"derive_more",
"encoding_rs",
"foldhash",
"futures-core",
"futures-util",
"impl-more",
"itoa",
"language-tags",
"log",
"mime",
"once_cell",
"pin-project-lite",
"regex-lite",
"serde",
"serde_json",
"serde_urlencoded",
"smallvec",
"socket2 0.6.3",
"time",
"tracing",
"url",
]
[[package]]
name = "actix-web-codegen"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8"
dependencies = [
"actix-router",
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]]
name = "actix_derive"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6ac1e58cded18cb28ddc17143c4dea5345b3ad575e14f32f66e4054a56eb271"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]] [[package]]
name = "addchain" name = "addchain"
version = "0.2.1" version = "0.2.1"
@ -1011,7 +788,7 @@ dependencies = [
"axum-core 0.4.5", "axum-core 0.4.5",
"bytes", "bytes",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -1045,7 +822,7 @@ dependencies = [
"bytes", "bytes",
"form_urlencoded", "form_urlencoded",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -1080,7 +857,7 @@ dependencies = [
"async-trait", "async-trait",
"bytes", "bytes",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"mime", "mime",
@ -1099,7 +876,7 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
dependencies = [ dependencies = [
"bytes", "bytes",
"futures-core", "futures-core",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"mime", "mime",
@ -1313,7 +1090,7 @@ dependencies = [
"futures-util", "futures-util",
"hex", "hex",
"home", "home",
"http 1.4.0", "http",
"http-body-util", "http-body-util",
"hyper", "hyper",
"hyper-named-pipe", "hyper-named-pipe",
@ -1466,15 +1243,6 @@ dependencies = [
"serde_core", "serde_core",
] ]
[[package]]
name = "bytestring"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "113b4343b5f6617e7ad401ced8de3cc8b012e73a594347c307b90db3e9271289"
dependencies = [
"bytes",
]
[[package]] [[package]]
name = "bzip2-sys" name = "bzip2-sys"
version = "0.1.13+1.0.8" version = "0.1.13+1.0.8"
@ -1732,20 +1500,15 @@ dependencies = [
"anyhow", "anyhow",
"base64 0.22.1", "base64 0.22.1",
"borsh", "borsh",
"bytesize",
"hex", "hex",
"log", "log",
"logos-blockchain-common-http-client", "logos-blockchain-common-http-client",
"nssa", "nssa",
"nssa_core", "nssa_core",
"reqwest",
"serde", "serde",
"serde_json",
"serde_with", "serde_with",
"sha2", "sha2",
"thiserror 2.0.18", "thiserror 2.0.18",
"tokio-retry",
"url",
] ]
[[package]] [[package]]
@ -1877,15 +1640,6 @@ dependencies = [
"unicode-segmentation", "unicode-segmentation",
] ]
[[package]]
name = "convert_case"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
dependencies = [
"unicode-segmentation",
]
[[package]] [[package]]
name = "convert_case" name = "convert_case"
version = "0.11.0" version = "0.11.0"
@ -1992,15 +1746,6 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b"
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
dependencies = [
"crossbeam-utils",
]
[[package]] [[package]]
name = "crossbeam-deque" name = "crossbeam-deque"
version = "0.8.6" version = "0.8.6"
@ -2297,7 +2042,6 @@ version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
dependencies = [ dependencies = [
"convert_case 0.10.0",
"proc-macro2", "proc-macro2",
"quote", "quote",
"rustc_version", "rustc_version",
@ -3099,7 +2843,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"gloo-utils", "gloo-utils",
"http 1.4.0", "http",
"js-sys", "js-sys",
"pin-project", "pin-project",
"serde", "serde",
@ -3163,7 +2907,7 @@ dependencies = [
"fnv", "fnv",
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"http 1.4.0", "http",
"indexmap 2.13.0", "indexmap 2.13.0",
"slab", "slab",
"tokio", "tokio",
@ -3318,17 +3062,6 @@ dependencies = [
"utf8-width", "utf8-width",
] ]
[[package]]
name = "http"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]] [[package]]
name = "http" name = "http"
version = "1.4.0" version = "1.4.0"
@ -3346,7 +3079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [ dependencies = [
"bytes", "bytes",
"http 1.4.0", "http",
] ]
[[package]] [[package]]
@ -3357,7 +3090,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
dependencies = [ dependencies = [
"bytes", "bytes",
"futures-core", "futures-core",
"http 1.4.0", "http",
"http-body", "http-body",
"pin-project-lite", "pin-project-lite",
] ]
@ -3432,7 +3165,7 @@ dependencies = [
"futures-channel", "futures-channel",
"futures-core", "futures-core",
"h2", "h2",
"http 1.4.0", "http",
"http-body", "http-body",
"httparse", "httparse",
"httpdate", "httpdate",
@ -3465,7 +3198,7 @@ version = "0.27.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58"
dependencies = [ dependencies = [
"http 1.4.0", "http",
"hyper", "hyper",
"hyper-util", "hyper-util",
"log", "log",
@ -3516,14 +3249,14 @@ dependencies = [
"bytes", "bytes",
"futures-channel", "futures-channel",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"hyper", "hyper",
"ipnet", "ipnet",
"libc", "libc",
"percent-encoding", "percent-encoding",
"pin-project-lite", "pin-project-lite",
"socket2 0.6.3", "socket2",
"system-configuration", "system-configuration",
"tokio", "tokio",
"tower-service", "tower-service",
@ -3684,12 +3417,6 @@ dependencies = [
"icu_properties", "icu_properties",
] ]
[[package]]
name = "impl-more"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2"
[[package]] [[package]]
name = "include_bytes_aligned" name = "include_bytes_aligned"
version = "0.1.4" version = "0.1.4"
@ -3725,7 +3452,6 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"arc-swap", "arc-swap",
"async-trait",
"clap", "clap",
"env_logger", "env_logger",
"futures", "futures",
@ -3825,8 +3551,6 @@ name = "integration_tests"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base64 0.22.1",
"borsh",
"bytesize", "bytesize",
"common", "common",
"env_logger", "env_logger",
@ -3839,7 +3563,8 @@ dependencies = [
"nssa", "nssa",
"nssa_core", "nssa_core",
"sequencer_core", "sequencer_core",
"sequencer_runner", "sequencer_service",
"sequencer_service_rpc",
"serde_json", "serde_json",
"tempfile", "tempfile",
"testcontainers", "testcontainers",
@ -4048,7 +3773,7 @@ dependencies = [
"futures-channel", "futures-channel",
"futures-util", "futures-util",
"gloo-net", "gloo-net",
"http 1.4.0", "http",
"jsonrpsee-core", "jsonrpsee-core",
"pin-project", "pin-project",
"rustls", "rustls",
@ -4073,7 +3798,7 @@ dependencies = [
"bytes", "bytes",
"futures-timer", "futures-timer",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"jsonrpsee-types", "jsonrpsee-types",
@ -4134,7 +3859,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c51b7c290bb68ce3af2d029648148403863b982f138484a73f02a9dd52dbd7f" checksum = "4c51b7c290bb68ce3af2d029648148403863b982f138484a73f02a9dd52dbd7f"
dependencies = [ dependencies = [
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -4160,7 +3885,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc88ff4688e43cc3fa9883a8a95c6fa27aa2e76c96e610b737b6554d650d7fd5" checksum = "bc88ff4688e43cc3fa9883a8a95c6fa27aa2e76c96e610b737b6554d650d7fd5"
dependencies = [ dependencies = [
"http 1.4.0", "http",
"serde", "serde",
"serde_json", "serde_json",
"thiserror 2.0.18", "thiserror 2.0.18",
@ -4184,7 +3909,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b6fceceeb05301cc4c065ab3bd2fa990d41ff4eb44e4ca1b30fa99c057c3e79" checksum = "9b6fceceeb05301cc4c065ab3bd2fa990d41ff4eb44e4ca1b30fa99c057c3e79"
dependencies = [ dependencies = [
"http 1.4.0", "http",
"jsonrpsee-client-transport", "jsonrpsee-client-transport",
"jsonrpsee-core", "jsonrpsee-core",
"jsonrpsee-types", "jsonrpsee-types",
@ -4238,12 +3963,6 @@ dependencies = [
"thiserror 2.0.18", "thiserror 2.0.18",
] ]
[[package]]
name = "language-tags"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388"
[[package]] [[package]]
name = "lazy-regex" name = "lazy-regex"
version = "3.6.0" version = "3.6.0"
@ -4620,12 +4339,6 @@ version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
[[package]]
name = "local-waker"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487"
[[package]] [[package]]
name = "lock_api" name = "lock_api"
version = "0.4.14" version = "0.4.14"
@ -5384,7 +5097,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
dependencies = [ dependencies = [
"libc", "libc",
"log",
"wasi", "wasi",
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
@ -5398,7 +5110,7 @@ dependencies = [
"bytes", "bytes",
"encoding_rs", "encoding_rs",
"futures-util", "futures-util",
"http 1.4.0", "http",
"httparse", "httparse",
"memchr", "memchr",
"mime", "mime",
@ -5545,6 +5257,7 @@ dependencies = [
"risc0-zkvm", "risc0-zkvm",
"secp256k1", "secp256k1",
"serde", "serde",
"serde_with",
"sha2", "sha2",
"test-case", "test-case",
"test_program_methods", "test_program_methods",
@ -6148,8 +5861,10 @@ name = "program_deployment"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"clap", "clap",
"common",
"nssa", "nssa",
"nssa_core", "nssa_core",
"sequencer_service_rpc",
"tokio", "tokio",
"wallet", "wallet",
] ]
@ -6270,7 +5985,7 @@ dependencies = [
"quinn-udp", "quinn-udp",
"rustc-hash", "rustc-hash",
"rustls", "rustls",
"socket2 0.6.3", "socket2",
"thiserror 2.0.18", "thiserror 2.0.18",
"tokio", "tokio",
"tracing", "tracing",
@ -6307,7 +6022,7 @@ dependencies = [
"cfg_aliases", "cfg_aliases",
"libc", "libc",
"once_cell", "once_cell",
"socket2 0.6.3", "socket2",
"tracing", "tracing",
"windows-sys 0.60.2", "windows-sys 0.60.2",
] ]
@ -6581,12 +6296,6 @@ dependencies = [
"regex-syntax", "regex-syntax",
] ]
[[package]]
name = "regex-lite"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973"
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.8.10" version = "0.8.10"
@ -6606,7 +6315,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2", "h2",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -7453,47 +7162,43 @@ dependencies = [
] ]
[[package]] [[package]]
name = "sequencer_rpc" name = "sequencer_service"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"actix-cors",
"actix-web",
"anyhow", "anyhow",
"base58",
"base64 0.22.1",
"bedrock_client",
"borsh", "borsh",
"bytesize", "bytesize",
"common",
"futures",
"hex",
"itertools 0.14.0",
"log",
"mempool",
"nssa",
"sequencer_core",
"serde",
"serde_json",
"tempfile",
"tokio",
]
[[package]]
name = "sequencer_runner"
version = "0.1.0"
dependencies = [
"actix",
"actix-web",
"anyhow",
"clap", "clap",
"common", "common",
"env_logger", "env_logger",
"futures", "futures",
"indexer_service_rpc", "indexer_service_rpc",
"jsonrpsee",
"log", "log",
"mempool",
"nssa",
"sequencer_core", "sequencer_core",
"sequencer_rpc", "sequencer_service_protocol",
"sequencer_service_rpc",
"tokio", "tokio",
"tokio-util",
]
[[package]]
name = "sequencer_service_protocol"
version = "0.1.0"
dependencies = [
"common",
"nssa",
"nssa_core",
]
[[package]]
name = "sequencer_service_rpc"
version = "0.1.0"
dependencies = [
"jsonrpsee",
"sequencer_service_protocol",
] ]
[[package]] [[package]]
@ -7689,7 +7394,7 @@ dependencies = [
"const_format", "const_format",
"futures", "futures",
"gloo-net", "gloo-net",
"http 1.4.0", "http",
"http-body-util", "http-body-util",
"hyper", "hyper",
"inventory", "inventory",
@ -7826,16 +7531,6 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "socket2"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]] [[package]]
name = "socket2" name = "socket2"
version = "0.6.3" version = "0.6.3"
@ -7855,7 +7550,7 @@ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"bytes", "bytes",
"futures", "futures",
"http 1.4.0", "http",
"httparse", "httparse",
"log", "log",
"rand 0.8.5", "rand 0.8.5",
@ -8161,7 +7856,7 @@ dependencies = [
"etcetera", "etcetera",
"ferroid", "ferroid",
"futures", "futures",
"http 1.4.0", "http",
"itertools 0.14.0", "itertools 0.14.0",
"log", "log",
"memchr", "memchr",
@ -8321,7 +8016,7 @@ dependencies = [
"parking_lot", "parking_lot",
"pin-project-lite", "pin-project-lite",
"signal-hook-registry", "signal-hook-registry",
"socket2 0.6.3", "socket2",
"tokio-macros", "tokio-macros",
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
@ -8518,7 +8213,7 @@ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"bytes", "bytes",
"h2", "h2",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"hyper", "hyper",
@ -8526,7 +8221,7 @@ dependencies = [
"hyper-util", "hyper-util",
"percent-encoding", "percent-encoding",
"pin-project", "pin-project",
"socket2 0.6.3", "socket2",
"sync_wrapper", "sync_wrapper",
"tokio", "tokio",
"tokio-stream", "tokio-stream",
@ -8576,7 +8271,7 @@ dependencies = [
"bytes", "bytes",
"futures-core", "futures-core",
"futures-util", "futures-util",
"http 1.4.0", "http",
"http-body", "http-body",
"http-body-util", "http-body-util",
"http-range-header", "http-range-header",
@ -8678,7 +8373,7 @@ checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442"
dependencies = [ dependencies = [
"bytes", "bytes",
"data-encoding", "data-encoding",
"http 1.4.0", "http",
"httparse", "httparse",
"log", "log",
"rand 0.9.2", "rand 0.9.2",
@ -8860,7 +8555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"http 1.4.0", "http",
"httparse", "httparse",
"log", "log",
] ]
@ -8955,8 +8650,6 @@ dependencies = [
"anyhow", "anyhow",
"async-stream", "async-stream",
"base58", "base58",
"base64 0.22.1",
"borsh",
"clap", "clap",
"common", "common",
"env_logger", "env_logger",
@ -8972,9 +8665,11 @@ dependencies = [
"nssa_core", "nssa_core",
"optfield", "optfield",
"rand 0.8.5", "rand 0.8.5",
"sequencer_service_rpc",
"serde", "serde",
"serde_json", "serde_json",
"sha2", "sha2",
"thiserror 2.0.18",
"token_core", "token_core",
"tokio", "tokio",
"url", "url",
@ -8985,9 +8680,9 @@ name = "wallet-ffi"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"cbindgen", "cbindgen",
"common",
"nssa", "nssa",
"nssa_core", "nssa_core",
"sequencer_service_rpc",
"tempfile", "tempfile",
"tokio", "tokio",
"wallet", "wallet",

View File

@ -17,9 +17,10 @@ members = [
"programs/amm", "programs/amm",
"programs/token/core", "programs/token/core",
"programs/token", "programs/token",
"sequencer_core", "sequencer/core",
"sequencer_rpc", "sequencer/service",
"sequencer_runner", "sequencer/service/protocol",
"sequencer/service/rpc",
"indexer/core", "indexer/core",
"indexer/service", "indexer/service",
"indexer/service/protocol", "indexer/service/protocol",
@ -42,9 +43,10 @@ common = { path = "common" }
mempool = { path = "mempool" } mempool = { path = "mempool" }
storage = { path = "storage" } storage = { path = "storage" }
key_protocol = { path = "key_protocol" } key_protocol = { path = "key_protocol" }
sequencer_core = { path = "sequencer_core" } sequencer_core = { path = "sequencer/core" }
sequencer_rpc = { path = "sequencer_rpc" } sequencer_service_protocol = { path = "sequencer/service/protocol" }
sequencer_runner = { path = "sequencer_runner" } sequencer_service_rpc = { path = "sequencer/service/rpc" }
sequencer_service = { path = "sequencer/service" }
indexer_core = { path = "indexer/core" } indexer_core = { path = "indexer/core" }
indexer_service = { path = "indexer/service" } indexer_service = { path = "indexer/service" }
indexer_service_protocol = { path = "indexer/service/protocol" } indexer_service_protocol = { path = "indexer/service/protocol" }

View File

@ -30,10 +30,10 @@ run-bedrock:
docker compose up docker compose up
# Run Sequencer # Run Sequencer
[working-directory: 'sequencer_runner'] [working-directory: 'sequencer/service']
run-sequencer: run-sequencer:
@echo "🧠 Running sequencer" @echo "🧠 Running sequencer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_runner configs/debug RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_service configs/debug/sequencer_config.json
# Run Indexer # Run Indexer
[working-directory: 'indexer/service'] [working-directory: 'indexer/service']
@ -62,8 +62,8 @@ run-wallet +args:
# Clean runtime data # Clean runtime data
clean: clean:
@echo "🧹 Cleaning run artifacts" @echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key rm -rf sequencer/service/bedrock_signing_key
rm -rf sequencer_runner/rocksdb rm -rf sequencer/service/rocksdb
rm -rf indexer/service/rocksdb rm -rf indexer/service/rocksdb
rm -rf wallet/configs/debug/storage.json rm -rf wallet/configs/debug/storage.json
rm -rf rocksdb rm -rf rocksdb

View File

@ -161,7 +161,7 @@ The sequencer and logos blockchain node can be run locally:
- `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json` - `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json`
3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer: 3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer:
- `RUST_LOG=info cargo run -p sequencer_runner sequencer_runner/configs/debug` - `RUST_LOG=info cargo run -p sequencer_service sequencer/service/configs/debug/sequencer_config.json`
4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following: 4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following:
- `cargo install cargo-leptos` - `cargo install cargo-leptos`
- `cargo leptos build --release` - `cargo leptos build --release`
@ -171,8 +171,8 @@ The sequencer and logos blockchain node can be run locally:
After stopping services above you need to remove 3 folders to start cleanly: After stopping services above you need to remove 3 folders to start cleanly:
1. In the `logos-blockchain/logos-blockchain` folder `state` (not needed in case of docker setup) 1. In the `logos-blockchain/logos-blockchain` folder `state` (not needed in case of docker setup)
2. In the `lssa` folder `sequencer_runner/rocksdb` 2. In the `lssa` folder `sequencer/service/rocksdb`
3. In the `lssa` file `sequencer_runner/bedrock_signing_key` 3. In the `lssa` file `sequencer/service/bedrock_signing_key`
4. In the `lssa` folder `indexer/service/rocksdb` 4. In the `lssa` folder `indexer/service/rocksdb`
### Normal mode (`just` commands) ### Normal mode (`just` commands)
@ -220,7 +220,7 @@ This will use a wallet binary built from this repo and not the one installed in
### Standalone mode ### Standalone mode
The sequencer can be run in standalone mode with: The sequencer can be run in standalone mode with:
```bash ```bash
RUST_LOG=info cargo run --features standalone -p sequencer_runner sequencer_runner/configs/debug RUST_LOG=info cargo run --features standalone -p sequencer_service sequencer/service/configs/debug
``` ```
## Running with Docker ## Running with Docker

View File

@ -13,16 +13,11 @@ nssa_core.workspace = true
anyhow.workspace = true anyhow.workspace = true
thiserror.workspace = true thiserror.workspace = true
serde_json.workspace = true
serde.workspace = true serde.workspace = true
serde_with.workspace = true serde_with.workspace = true
reqwest.workspace = true base64.workspace = true
sha2.workspace = true sha2.workspace = true
log.workspace = true log.workspace = true
hex.workspace = true hex.workspace = true
borsh.workspace = true borsh.workspace = true
bytesize.workspace = true
base64.workspace = true
url.workspace = true
logos-blockchain-common-http-client.workspace = true logos-blockchain-common-http-client.workspace = true
tokio-retry.workspace = true

View File

@ -60,6 +60,18 @@ pub struct Block {
pub bedrock_parent_id: MantleMsgId, pub bedrock_parent_id: MantleMsgId,
} }
impl Serialize for Block {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for Block {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct HashableBlockData { pub struct HashableBlockData {
pub block_id: BlockId, pub block_id: BlockId,

View File

@ -0,0 +1,25 @@
//! This module provides utilities for serializing and deserializing data by combining Borsh and
//! Base64 encodings.
use base64::{Engine as _, engine::general_purpose::STANDARD};
use borsh::{BorshDeserialize, BorshSerialize};
use serde::{Deserialize, Serialize};
pub fn serialize<T: BorshSerialize, S: serde::Serializer>(
value: &T,
serializer: S,
) -> Result<S::Ok, S::Error> {
let borsh_encoded = borsh::to_vec(value).map_err(serde::ser::Error::custom)?;
let base64_encoded = STANDARD.encode(&borsh_encoded);
Serialize::serialize(&base64_encoded, serializer)
}
pub fn deserialize<'de, T: BorshDeserialize, D: serde::Deserializer<'de>>(
deserializer: D,
) -> Result<T, D::Error> {
let base64_encoded = <String as Deserialize>::deserialize(deserializer)?;
let borsh_encoded = STANDARD
.decode(base64_encoded.as_bytes())
.map_err(serde::de::Error::custom)?;
borsh::from_slice(&borsh_encoded).map_err(serde::de::Error::custom)
}

View File

@ -1,43 +0,0 @@
use nssa::AccountId;
use serde::Deserialize;
use crate::rpc_primitives::errors::RpcError;
#[derive(Debug, Clone, Deserialize)]
pub struct SequencerRpcError {
pub jsonrpc: String,
pub error: RpcError,
pub id: u64,
}
#[derive(thiserror::Error, Debug)]
pub enum SequencerClientError {
#[error("HTTP error")]
HTTPError(#[from] reqwest::Error),
#[error("Serde error")]
SerdeError(#[from] serde_json::Error),
#[error("Internal error: {0:?}")]
InternalError(SequencerRpcError),
}
impl From<SequencerRpcError> for SequencerClientError {
fn from(value: SequencerRpcError) -> Self {
Self::InternalError(value)
}
}
#[derive(Debug, thiserror::Error)]
pub enum ExecutionFailureKind {
#[error("Failed to get data from sequencer")]
SequencerError(#[source] anyhow::Error),
#[error("Inputs amounts does not match outputs")]
AmountMismatchError,
#[error("Accounts key not found")]
KeyNotFoundError,
#[error("Sequencer client error: {0:?}")]
SequencerClientError(#[from] SequencerClientError),
#[error("Can not pay for operation")]
InsufficientFundsError,
#[error("Account {0} data is invalid")]
AccountDataError(AccountId),
}

View File

@ -4,10 +4,8 @@ use borsh::{BorshDeserialize, BorshSerialize};
use serde_with::{DeserializeFromStr, SerializeDisplay}; use serde_with::{DeserializeFromStr, SerializeDisplay};
pub mod block; pub mod block;
mod borsh_base64;
pub mod config; pub mod config;
pub mod error;
pub mod rpc_primitives;
pub mod sequencer_client;
pub mod transaction; pub mod transaction;
// Module for tests utility functions // Module for tests utility functions

View File

@ -1,194 +0,0 @@
use std::fmt;
use serde_json::{Value, to_value};
#[derive(serde::Serialize)]
pub struct RpcParseError(pub String);
/// This struct may be returned from JSON RPC server in case of error.
///
/// It is expected that that this struct has impls From<_> all other RPC errors
/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError).
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct RpcError {
#[serde(flatten)]
pub error_struct: Option<RpcErrorKind>,
/// Deprecated please use the `error_struct` instead.
pub code: i64,
/// Deprecated please use the `error_struct` instead.
pub message: String,
/// Deprecated please use the `error_struct` instead.
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Value>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "cause", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcErrorKind {
RequestValidationError(RpcRequestValidationErrorKind),
HandlerError(Value),
InternalError(Value),
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcRequestValidationErrorKind {
MethodNotFound { method_name: String },
ParseError { error_message: String },
}
/// A general Server Error.
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum ServerError {
Timeout,
Closed,
}
impl RpcError {
/// A generic constructor.
///
/// Mostly for completeness, doesn't do anything but filling in the corresponding fields.
#[must_use]
pub const fn new(code: i64, message: String, data: Option<Value>) -> Self {
Self {
code,
message,
data,
error_struct: None,
}
}
/// Create an Invalid Param error.
pub fn invalid_params(data: impl serde::Serialize) -> Self {
let value = match to_value(data) {
Ok(value) => value,
Err(err) => {
return Self::server_error(Some(format!(
"Failed to serialize invalid parameters error: {:?}",
err.to_string()
)));
}
};
Self::new(-32_602, "Invalid params".to_owned(), Some(value))
}
/// Create a server error.
pub fn server_error<E: serde::Serialize>(e: Option<E>) -> Self {
Self::new(
-32_000,
"Server error".to_owned(),
e.map(|v| to_value(v).expect("Must be representable in JSON")),
)
}
/// Create a parse error.
#[must_use]
pub fn parse_error(e: String) -> Self {
Self {
code: -32_700,
message: "Parse error".to_owned(),
data: Some(Value::String(e.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::ParseError { error_message: e },
)),
}
}
#[must_use]
pub fn serialization_error(e: &str) -> Self {
Self::new_internal_error(Some(Value::String(e.to_owned())), e)
}
/// Helper method to define extract `INTERNAL_ERROR` in separate `RpcErrorKind`
/// Returns `HANDLER_ERROR` if the error is not internal one.
#[must_use]
pub fn new_internal_or_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
if error_struct["name"] == "INTERNAL_ERROR" {
let error_message = match error_struct["info"].get("error_message") {
Some(Value::String(error_message)) => error_message.as_str(),
_ => "InternalError happened during serializing InternalError",
};
Self::new_internal_error(error_data, error_message)
} else {
Self::new_handler_error(error_data, error_struct)
}
}
#[must_use]
pub fn new_internal_error(error_data: Option<Value>, info: &str) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::InternalError(serde_json::json!({
"name": "INTERNAL_ERROR",
"info": serde_json::json!({"error_message": info})
}))),
}
}
fn new_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::HandlerError(error_struct)),
}
}
/// Create a method not found error.
#[must_use]
pub fn method_not_found(method: String) -> Self {
Self {
code: -32_601,
message: "Method not found".to_owned(),
data: Some(Value::String(method.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::MethodNotFound {
method_name: method,
},
)),
}
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
}
}
impl From<RpcParseError> for RpcError {
fn from(parse_error: RpcParseError) -> Self {
Self::parse_error(parse_error.0)
}
}
impl From<std::convert::Infallible> for RpcError {
fn from(_: std::convert::Infallible) -> Self {
// SAFETY: Infallible error can never be constructed, so this code can never be reached.
unsafe { core::hint::unreachable_unchecked() }
}
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Timeout => write!(f, "ServerError: Timeout"),
Self::Closed => write!(f, "ServerError: Closed"),
}
}
}
impl From<ServerError> for RpcError {
fn from(e: ServerError) -> Self {
let error_data = match to_value(&e) {
Ok(value) => value,
Err(_err) => {
return Self::new_internal_error(None, "Failed to serialize ServerError");
}
};
Self::new_internal_error(Some(error_data), e.to_string().as_str())
}
}

View File

@ -1,588 +0,0 @@
// Copyright 2017 tokio-jsonrpc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! JSON-RPC 2.0 messages.
//!
//! The main entrypoint here is the [Message](enum.Message.html). The others are just building
//! blocks and you should generally work with `Message` instead.
use std::fmt::{Formatter, Result as FmtResult};
use serde::{
de::{Deserializer, Error, Unexpected, Visitor},
ser::{SerializeStruct as _, Serializer},
};
use serde_json::{Result as JsonResult, Value};
use super::errors::RpcError;
pub type Parsed = Result<Message, Broken>;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct Version;
impl serde::Serialize for Version {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str("2.0")
}
}
impl<'de> serde::Deserialize<'de> for Version {
#[expect(
clippy::renamed_function_params,
reason = "More readable than original serde parameter names"
)]
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct VersionVisitor;
impl Visitor<'_> for VersionVisitor {
type Value = Version;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
formatter.write_str("a version string")
}
fn visit_str<E: Error>(self, value: &str) -> Result<Version, E> {
match value {
"2.0" => Ok(Version),
_ => Err(E::invalid_value(Unexpected::Str(value), &"value 2.0")),
}
}
}
deserializer.deserialize_str(VersionVisitor)
}
}
/// An RPC request.
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
pub struct Request {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
pub id: Value,
}
impl Request {
#[must_use]
pub fn from_payload_version_2_0(method: String, payload: serde_json::Value) -> Self {
Self {
jsonrpc: Version,
method,
params: payload,
// ToDo: Correct checking of id
id: 1.into(),
}
}
/// Answer the request with a (positive) reply.
///
/// The ID is taken from the request.
#[must_use]
pub fn reply(&self, reply: Value) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Ok(reply),
id: self.id.clone(),
})
}
/// Answer the request with an error.
#[must_use]
pub fn error(&self, error: RpcError) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Err(error),
id: self.id.clone(),
})
}
}
/// A response to an RPC.
///
/// It is created by the methods on [Request](struct.Request.html).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Response {
jsonrpc: Version,
pub result: Result<Value, RpcError>,
pub id: Value,
}
impl serde::Serialize for Response {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut sub = serializer.serialize_struct("Response", 3)?;
sub.serialize_field("jsonrpc", &self.jsonrpc)?;
match &self.result {
Ok(value) => sub.serialize_field("result", value),
Err(err) => sub.serialize_field("error", err),
}?;
sub.serialize_field("id", &self.id)?;
sub.end()
}
}
/// A helper trick for deserialization.
#[derive(serde::Deserialize)]
#[serde(deny_unknown_fields)]
struct WireResponse {
// It is actually used to eat and sanity check the deserialized text
#[serde(rename = "jsonrpc")]
_jsonrpc: Version,
// Make sure we accept null as Some(Value::Null), instead of going to None
#[serde(default, deserialize_with = "some_value")]
result: Option<Value>,
error: Option<RpcError>,
id: Value,
}
// Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar
// structure that directly corresponds to whatever is on the wire and then convert it to our more
// convenient representation.
impl<'de> serde::Deserialize<'de> for Response {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let wr: WireResponse = serde::Deserialize::deserialize(deserializer)?;
let result = match (wr.result, wr.error) {
(Some(res), None) => Ok(res),
(None, Some(err)) => Err(err),
_ => {
let err = D::Error::custom("Either 'error' or 'result' is expected, but not both");
return Err(err);
}
};
Ok(Self {
jsonrpc: Version,
result,
id: wr.id,
})
}
}
/// A notification (doesn't expect an answer).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Notification {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
}
/// One message of the JSON RPC protocol.
///
/// One message, directly mapped from the structures of the protocol. See the
/// [specification](http://www.jsonrpc.org/specification) for more details.
///
/// Since the protocol allows one endpoint to be both client and server at the same time, the
/// message can decode and encode both directions of the protocol.
///
/// The `Batch` variant is supposed to be created directly, without a constructor.
///
/// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests
/// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level
/// element, it is returned as `Err(Broken::Unmatched)`.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
pub enum Message {
/// An RPC request.
Request(Request),
/// A response to a Request.
Response(Response),
/// A notification.
Notification(Notification),
/// A batch of more requests or responses.
///
/// The protocol allows bundling multiple requests, notifications or responses to a single
/// message.
///
/// This variant has no direct constructor and is expected to be constructed manually.
Batch(Vec<Self>),
/// An unmatched sub entry in a `Batch`.
///
/// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one
/// is represented by this. This is never produced as a top-level value when parsing, the
/// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize.
#[serde(skip_serializing)]
UnmatchedSub(Value),
}
impl Message {
/// A constructor for a request.
///
/// The ID is auto-set to dontcare.
#[must_use]
pub fn request(method: String, params: Value) -> Self {
let id = Value::from("dontcare");
Self::Request(Request {
jsonrpc: Version,
method,
params,
id,
})
}
/// Create a top-level error (without an ID).
#[must_use]
pub const fn error(error: RpcError) -> Self {
Self::Response(Response {
jsonrpc: Version,
result: Err(error),
id: Value::Null,
})
}
/// A constructor for a notification.
#[must_use]
pub const fn notification(method: String, params: Value) -> Self {
Self::Notification(Notification {
jsonrpc: Version,
method,
params,
})
}
/// A constructor for a response.
#[must_use]
pub const fn response(id: Value, result: Result<Value, RpcError>) -> Self {
Self::Response(Response {
jsonrpc: Version,
result,
id,
})
}
/// Returns id or Null if there is no id.
#[must_use]
pub fn id(&self) -> Value {
match self {
Self::Request(req) => req.id.clone(),
Self::Response(response) => response.id.clone(),
Self::Notification(_) | Self::Batch(_) | Self::UnmatchedSub(_) => Value::Null,
}
}
}
impl From<Message> for String {
fn from(val: Message) -> Self {
::serde_json::ser::to_string(&val).expect("message serialization to json should not fail")
}
}
impl From<Message> for Vec<u8> {
fn from(val: Message) -> Self {
::serde_json::ser::to_vec(&val)
.expect("message serialization to json bytes should not fail")
}
}
/// A broken message.
///
/// Protocol-level errors.
#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)]
#[serde(untagged)]
pub enum Broken {
/// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message.
Unmatched(Value),
/// Invalid JSON.
#[serde(skip_deserializing)]
SyntaxError(String),
}
impl Broken {
/// Generate an appropriate error message.
///
/// The error message for these things are specified in the RFC, so this just creates an error
/// with the right values.
#[must_use]
pub fn reply(&self) -> Message {
match self {
Self::Unmatched(_) => Message::error(RpcError::parse_error(
"JSON RPC Request format was expected".to_owned(),
)),
Self::SyntaxError(e) => Message::error(RpcError::parse_error(e.clone())),
}
}
}
/// A trick to easily deserialize and detect valid JSON, but invalid Message.
#[derive(serde::Deserialize)]
#[serde(untagged)]
pub enum WireMessage {
Message(Message),
Broken(Broken),
}
pub fn decoded_to_parsed(res: JsonResult<WireMessage>) -> Parsed {
match res {
Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)),
Ok(WireMessage::Message(m)) => Ok(m),
Ok(WireMessage::Broken(b)) => Err(b),
Err(e) => Err(Broken::SyntaxError(e.to_string())),
}
}
/// Read a [Message](enum.Message.html) from a slice.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_slice(s: &[u8]) -> Parsed {
decoded_to_parsed(::serde_json::de::from_slice(s))
}
/// Read a [Message](enum.Message.html) from a string.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_str(s: &str) -> Parsed {
from_slice(s.as_bytes())
}
/// Deserializer for `Option<Value>` that produces `Some(Value::Null)`.
///
/// The usual one produces None in that case. But we need to know the difference between
/// `{x: null}` and `{}`.
fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Option<Value>, D::Error> {
serde::Deserialize::deserialize(deserializer).map(Some)
}
#[cfg(test)]
mod tests {
use serde_json::{Value, de::from_slice, json, ser::to_vec};
use super::*;
/// Test serialization and deserialization of the Message.
///
/// We first deserialize it from a string. That way we check deserialization works.
/// But since serialization doesn't have to produce the exact same result (order, spaces, …),
/// we then serialize and deserialize the thing again and check it matches.
#[test]
fn message_serde() {
// A helper for running one message test
fn one(input: &str, expected: &Message) {
let parsed: Message = from_str(input).unwrap();
assert_eq!(*expected, parsed);
let serialized = to_vec(&parsed).unwrap();
let deserialized: Message = from_slice(&serialized).unwrap();
assert_eq!(parsed, deserialized);
}
// A request without parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(1),
}),
);
// A request with parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: json!([1, 2, 3]),
id: json!(2),
}),
);
// A notification (with parameters)
one(
r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#,
&Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: json!({"x": "y"}),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(json!(42)),
id: json!(3),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(Value::Null),
id: json!(3),
}),
);
// An error
one(
r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: Value::Null,
}),
);
// A batch
one(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42}
]"#,
&Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
]),
);
// Some handling of broken messages inside a batch
let parsed = from_str(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42},
true
]"#,
)
.unwrap();
assert_eq!(
Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
Message::UnmatchedSub(Value::Bool(true)),
]),
parsed
);
to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err();
}
/// A helper for the `broken` test.
///
/// Check that the given JSON string parses, but is not recognized as a valid RPC message.
///
/// Test things that are almost but not entirely JSONRPC are rejected.
///
/// The reject is done by returning it as Unmatched.
#[test]
fn broken() {
// A helper with one test
fn one(input: &str) {
let msg = from_str(input);
match msg {
Err(Broken::Unmatched(_)) => (),
_ => panic!("{input} recognized as an RPC message: {msg:?}!"),
}
}
// Missing the version
one(r#"{"method": "notif"}"#);
// Wrong version
one(r#"{"jsonrpc": 2.0, "method": "notif"}"#);
// A response with both result and error
one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#);
// A response without an id
one(r#"{"jsonrpc": "2.0", "result": 42}"#);
// An extra field
one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#);
// Something completely different
one(r#"{"x": [1, 2, 3]}"#);
match from_str("{]") {
Err(Broken::SyntaxError(_)) => (),
other => panic!("Something unexpected: {other:?}"),
}
}
/// Test some non-trivial aspects of the constructors.
///
/// This doesn't have a full coverage, because there's not much to actually test there.
/// Most of it is related to the ids.
#[test]
#[ignore = "Not a full coverage test"]
fn constructors() {
let msg1 = Message::request("call".to_owned(), json!([1, 2, 3]));
let msg2 = Message::request("call".to_owned(), json!([1, 2, 3]));
// They differ, even when created with the same parameters
assert_ne!(msg1, msg2);
// And, specifically, they differ in the ID's
let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) {
assert_ne!(req1.id, req2.id);
assert!(req1.id.is_string());
assert!(req2.id.is_string());
(req1, req2)
} else {
panic!("Non-request received");
};
let id1 = req1.id.clone();
// When we answer a message, we get the same ID
if let Message::Response(resp) = req1.reply(json!([1, 2, 3])) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Ok(json!([1, 2, 3])),
id: id1
}
);
} else {
panic!("Not a response");
}
let id2 = req2.id.clone();
// The same with an error
if let Message::Response(resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: id2,
}
);
} else {
panic!("Not a response");
}
// When we have unmatched, we generate a top-level error with Null id.
if let Message::Response(resp) =
Message::error(RpcError::new(43, "Also wrong!".to_owned(), None))
{
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)),
id: Value::Null,
}
);
} else {
panic!("Not a response");
}
}
}

View File

@ -1,57 +0,0 @@
use bytesize::ByteSize;
use serde::{Deserialize, Serialize};
pub mod errors;
pub mod message;
pub mod parser;
pub mod requests;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcLimitsConfig {
/// Maximum byte size of the json payload.
pub json_payload_max_size: ByteSize,
}
impl Default for RpcLimitsConfig {
fn default() -> Self {
Self {
json_payload_max_size: ByteSize::mib(10),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
pub addr: String,
pub cors_allowed_origins: Vec<String>,
#[serde(default)]
pub limits_config: RpcLimitsConfig,
}
impl Default for RpcConfig {
fn default() -> Self {
Self {
addr: "0.0.0.0:3040".to_owned(),
cors_allowed_origins: vec!["*".to_owned()],
limits_config: RpcLimitsConfig::default(),
}
}
}
impl RpcConfig {
#[must_use]
pub fn new(addr: &str) -> Self {
Self {
addr: addr.to_owned(),
..Default::default()
}
}
#[must_use]
pub fn with_port(port: u16) -> Self {
Self {
addr: format!("0.0.0.0:{port}"),
..Default::default()
}
}
}

View File

@ -1,29 +0,0 @@
use serde::de::DeserializeOwned;
use serde_json::Value;
use super::errors::RpcParseError;
#[macro_export]
macro_rules! parse_request {
($request_name:ty) => {
impl RpcRequest for $request_name {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError> {
parse_params::<Self>(value)
}
}
};
}
pub trait RpcRequest: Sized {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError>;
}
pub fn parse_params<T: DeserializeOwned>(value: Option<Value>) -> Result<T, RpcParseError> {
value.map_or_else(
|| Err(RpcParseError("Require at least one parameter".to_owned())),
|value| {
serde_json::from_value(value)
.map_err(|err| RpcParseError(format!("Failed parsing args: {err}")))
},
)
}

View File

@ -1,219 +0,0 @@
use std::collections::HashMap;
use nssa::AccountId;
use nssa_core::program::ProgramId;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::{
errors::RpcParseError,
parser::{RpcRequest, parse_params},
};
use crate::{HashType, parse_request};
mod base64_deser {
use base64::{Engine as _, engine::general_purpose};
use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _};
pub mod vec {
use super::*;
pub fn serialize<S>(bytes_vec: &[Vec<u8>], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?;
for bytes in bytes_vec {
let s = general_purpose::STANDARD.encode(bytes);
seq.serialize_element(&s)?;
}
seq.end()
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error>
where
D: Deserializer<'de>,
{
let base64_strings: Vec<String> = Deserialize::deserialize(deserializer)?;
base64_strings
.into_iter()
.map(|s| {
general_purpose::STANDARD
.decode(&s)
.map_err(serde::de::Error::custom)
})
.collect()
}
}
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let base64_string = general_purpose::STANDARD.encode(bytes);
serializer.serialize_str(&base64_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let base64_string: String = Deserialize::deserialize(deserializer)?;
general_purpose::STANDARD
.decode(&base64_string)
.map_err(serde::de::Error::custom)
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountRequest {
pub account_id: [u8; 32],
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxRequest {
#[serde(with = "base64_deser")]
pub transaction: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataRequest {
pub block_id: u64,
}
/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive).
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataRequest {
pub start_block_id: u64,
pub end_block_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetInitialTestnetAccountsRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashRequest {
pub hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesRequest {
pub account_ids: Vec<AccountId>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentRequest {
pub commitment: nssa_core::Commitment,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsRequest;
parse_request!(HelloRequest);
parse_request!(RegisterAccountRequest);
parse_request!(SendTxRequest);
parse_request!(GetBlockDataRequest);
parse_request!(GetBlockRangeDataRequest);
parse_request!(GetGenesisIdRequest);
parse_request!(GetLastBlockRequest);
parse_request!(GetInitialTestnetAccountsRequest);
parse_request!(GetAccountBalanceRequest);
parse_request!(GetTransactionByHashRequest);
parse_request!(GetAccountsNoncesRequest);
parse_request!(GetProofForCommitmentRequest);
parse_request!(GetAccountRequest);
parse_request!(GetProgramIdsRequest);
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloResponse {
pub greeting: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountResponse {
pub status: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxResponse {
pub status: String,
pub tx_hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataResponse {
#[serde(with = "base64_deser")]
pub block: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataResponse {
#[serde(with = "base64_deser::vec")]
pub blocks: Vec<Vec<u8>>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdResponse {
pub genesis_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockResponse {
pub last_block: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceResponse {
pub balance: u128,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesResponse {
pub nonces: Vec<u128>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashResponse {
pub transaction: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountResponse {
pub account: nssa::Account,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentResponse {
pub membership_proof: Option<nssa_core::MembershipProof>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsResponse {
pub program_ids: HashMap<String, ProgramId>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GetInitialTestnetAccountsResponse {
/// Hex encoded account id.
pub account_id: String,
pub balance: u64,
}

View File

@ -12,6 +12,18 @@ pub enum NSSATransaction {
ProgramDeployment(nssa::ProgramDeploymentTransaction), ProgramDeployment(nssa::ProgramDeploymentTransaction),
} }
impl Serialize for NSSATransaction {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for NSSATransaction {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
impl NSSATransaction { impl NSSATransaction {
#[must_use] #[must_use]
pub fn hash(&self) -> HashType { pub fn hash(&self) -> HashType {
@ -90,7 +102,7 @@ impl From<nssa::ProgramDeploymentTransaction> for NSSATransaction {
} }
#[derive( #[derive(
Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
)] )]
pub enum TxKind { pub enum TxKind {
Public, Public,

View File

@ -1,6 +1,5 @@
{ {
"home": "/var/lib/sequencer_runner", "home": "/var/lib/sequencer_service",
"override_rust_log": null,
"genesis_id": 1, "genesis_id": 1,
"is_genesis_random": true, "is_genesis_random": true,
"max_num_tx_in_block": 20, "max_num_tx_in_block": 20,
@ -8,7 +7,6 @@
"mempool_max_size": 10000, "mempool_max_size": 10000,
"block_create_timeout": "10s", "block_create_timeout": "10s",
"retry_pending_blocks_timeout": "7s", "retry_pending_blocks_timeout": "7s",
"port": 3040,
"bedrock_config": { "bedrock_config": {
"backoff": { "backoff": {
"start_delay": "100ms", "start_delay": "100ms",

View File

@ -7,12 +7,12 @@ services:
environment: environment:
- RUST_LOG=error - RUST_LOG=error
sequencer_runner: sequencer_service:
depends_on: depends_on:
- logos-blockchain-node-0 - logos-blockchain-node-0
- indexer_service - indexer_service
volumes: !override volumes: !override
- ./configs/docker-all-in-one/sequencer:/etc/sequencer_runner - ./configs/docker-all-in-one/sequencer:/etc/sequencer_service
indexer_service: indexer_service:
depends_on: depends_on:

View File

@ -6,7 +6,7 @@ include:
- path: - path:
bedrock/docker-compose.yml bedrock/docker-compose.yml
- path: - path:
sequencer_runner/docker-compose.yml sequencer/service/docker-compose.yml
- path: - path:
indexer/service/docker-compose.yml indexer/service/docker-compose.yml
- path: - path:

View File

@ -8,8 +8,10 @@ license = { workspace = true }
workspace = true workspace = true
[dependencies] [dependencies]
common.workspace = true
nssa.workspace = true nssa.workspace = true
nssa_core.workspace = true nssa_core.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet.workspace = true wallet.workspace = true
tokio = { workspace = true, features = ["macros"] } tokio = { workspace = true, features = ["macros"] }

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{ use nssa::{
AccountId, PublicTransaction, AccountId, PublicTransaction,
program::Program, program::Program,
public_transaction::{Message, WitnessSet}, public_transaction::{Message, WitnessSet},
}; };
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore; use wallet::WalletCore;
// Before running this example, compile the `hello_world.rs` guest program with: // Before running this example, compile the `hello_world.rs` guest program with:
@ -58,7 +60,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();
} }

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{ use nssa::{
AccountId, PublicTransaction, AccountId, PublicTransaction,
program::Program, program::Program,
public_transaction::{Message, WitnessSet}, public_transaction::{Message, WitnessSet},
}; };
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore; use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with: // Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -54,7 +56,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();
} }

View File

@ -1,9 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{ use nssa::{
AccountId, PublicTransaction, AccountId, PublicTransaction,
program::Program, program::Program,
public_transaction::{Message, WitnessSet}, public_transaction::{Message, WitnessSet},
}; };
use nssa_core::account::Nonce; use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore; use wallet::WalletCore;
// Before running this example, compile the `hello_world_with_authorization.rs` guest program with: // Before running this example, compile the `hello_world_with_authorization.rs` guest program with:
@ -63,13 +64,7 @@ async fn main() {
.await .await
.expect("Node should be reachable to query account data"); .expect("Node should be reachable to query account data");
let signing_keys = [signing_key]; let signing_keys = [signing_key];
let message = Message::try_new( let message = Message::try_new(program.id(), vec![account_id], nonces, greeting).unwrap();
program.id(),
vec![account_id],
nonces.iter().map(|x| Nonce(*x)).collect(),
greeting,
)
.unwrap();
// Pass the signing key to sign the message. This will be used by the node // Pass the signing key to sign the message. This will be used by the node
// to flag the pre_state as `is_authorized` when executing the program // to flag the pre_state as `is_authorized` when executing the program
let witness_set = WitnessSet::for_message(&message, &signing_keys); let witness_set = WitnessSet::for_message(&message, &signing_keys);
@ -78,7 +73,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();
} }

View File

@ -3,12 +3,14 @@
reason = "This is an example program, it's fine to print to stdout" reason = "This is an example program, it's fine to print to stdout"
)] )]
use common::transaction::NSSATransaction;
use nssa::{ use nssa::{
AccountId, PublicTransaction, AccountId, PublicTransaction,
program::Program, program::Program,
public_transaction::{Message, WitnessSet}, public_transaction::{Message, WitnessSet},
}; };
use nssa_core::program::PdaSeed; use nssa_core::program::PdaSeed;
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore; use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with: // Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -56,7 +58,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();

View File

@ -1,5 +1,7 @@
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use common::transaction::NSSATransaction;
use nssa::{PublicTransaction, program::Program, public_transaction}; use nssa::{PublicTransaction, program::Program, public_transaction};
use sequencer_service_rpc::RpcClient as _;
use wallet::{PrivacyPreservingAccount, WalletCore}; use wallet::{PrivacyPreservingAccount, WalletCore};
// Before running this example, compile the `hello_world_with_move_function.rs` guest program with: // Before running this example, compile the `hello_world_with_move_function.rs` guest program with:
@ -87,7 +89,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();
} }
@ -126,7 +128,7 @@ async fn main() {
// Submit the transaction // Submit the transaction
let _response = wallet_core let _response = wallet_core
.sequencer_client .sequencer_client
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap(); .unwrap();
} }

View File

@ -41,12 +41,12 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as hash // Try as hash
if let Ok(hash) = HashType::from_str(&query) { if let Ok(hash) = HashType::from_str(&query) {
// Try as block hash // Try as block hash
if let Ok(block) = client.get_block_by_hash(hash).await { if let Ok(Some(block)) = client.get_block_by_hash(hash).await {
blocks.push(block); blocks.push(block);
} }
// Try as transaction hash // Try as transaction hash
if let Ok(tx) = client.get_transaction(hash).await { if let Ok(Some(tx)) = client.get_transaction(hash).await {
transactions.push(tx); transactions.push(tx);
} }
} }
@ -60,7 +60,7 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as block ID // Try as block ID
if let Ok(block_id) = query.parse::<u64>() if let Ok(block_id) = query.parse::<u64>()
&& let Ok(block) = client.get_block_by_id(block_id).await && let Ok(Some(block)) = client.get_block_by_id(block_id).await
{ {
blocks.push(block); blocks.push(block);
} }
@ -81,6 +81,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result<Block, ServerFnError>
.get_block_by_id(block_id) .get_block_by_id(block_id)
.await .await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}"))) .map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
} }
/// Get latest block ID /// Get latest block ID
@ -103,6 +104,7 @@ pub async fn get_block_by_hash(block_hash: HashType) -> Result<Block, ServerFnEr
.get_block_by_hash(block_hash) .get_block_by_hash(block_hash)
.await .await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}"))) .map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
} }
/// Get transaction by hash /// Get transaction by hash
@ -114,6 +116,9 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
.get_transaction(tx_hash) .get_transaction(tx_hash)
.await .await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}"))) .map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| {
opt.ok_or_else(|| ServerFnError::ServerError("Transaction not found".to_owned()))
})
} }
/// Get blocks with pagination /// Get blocks with pagination

View File

@ -84,7 +84,7 @@ pub fn TransactionPage() -> impl IntoView {
} = witness_set; } = witness_set;
let program_id_str = program_id.to_string(); let program_id_str = program_id.to_string();
let proof_len = proof.0.len(); let proof_len = proof.map_or(0, |p| p.0.len());
let signatures_count = signatures_and_public_keys.len(); let signatures_count = signatures_and_public_keys.len();
view! { view! {
@ -190,7 +190,7 @@ pub fn TransactionPage() -> impl IntoView {
(None, None) => "unbounded".to_owned(), (None, None) => "unbounded".to_owned(),
}; };
let proof_len = proof.0.len(); let proof_len = proof.map_or(0, |p| p.0.len());
view! { view! {
<div class="transaction-details"> <div class="transaction-details">
<h2>"Privacy-Preserving Transaction Details"</h2> <h2>"Privacy-Preserving Transaction Details"</h2>

View File

@ -28,4 +28,3 @@ async-stream.workspace = true
[dev-dependencies] [dev-dependencies]
tempfile.workspace = true tempfile.workspace = true

View File

@ -46,7 +46,7 @@ impl IndexerStore {
Ok(self.dbio.get_meta_last_block_in_db()?) Ok(self.dbio.get_meta_last_block_in_db()?)
} }
pub fn get_block_at_id(&self, id: u64) -> Result<Block> { pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>> {
Ok(self.dbio.get_block(id)?) Ok(self.dbio.get_block(id)?)
} }
@ -54,20 +54,25 @@ impl IndexerStore {
Ok(self.dbio.get_block_batch(before, limit)?) Ok(self.dbio.get_block_batch(before, limit)?)
} }
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> { pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<Option<NSSATransaction>> {
let block = self.get_block_at_id(self.dbio.get_block_id_by_tx_hash(tx_hash)?)?; let Some(block_id) = self.dbio.get_block_id_by_tx_hash(tx_hash)? else {
let transaction = block return Ok(None);
};
let Some(block) = self.get_block_at_id(block_id)? else {
return Ok(None);
};
Ok(block
.body .body
.transactions .transactions
.iter() .into_iter()
.find(|enc_tx| enc_tx.hash().0 == tx_hash) .find(|enc_tx| enc_tx.hash().0 == tx_hash))
.ok_or_else(|| anyhow::anyhow!("Transaction not found in DB"))?;
Ok(transaction.clone())
} }
pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Block> { pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Option<Block>> {
self.get_block_at_id(self.dbio.get_block_id_by_hash(hash)?) let Some(id) = self.dbio.get_block_id_by_hash(hash)? else {
return Ok(None);
};
self.get_block_at_id(id)
} }
pub fn get_transactions_by_account( pub fn get_transactions_by_account(
@ -171,7 +176,7 @@ mod tests {
) )
.unwrap(); .unwrap();
let block = storage.get_block_at_id(1).unwrap(); let block = storage.get_block_at_id(1).unwrap().unwrap();
let final_id = storage.get_last_block_id().unwrap(); let final_id = storage.get_last_block_id().unwrap();
assert_eq!(block.header.hash, genesis_block().header.hash); assert_eq!(block.header.hash, genesis_block().header.hash);

View File

@ -21,7 +21,6 @@ log.workspace = true
jsonrpsee.workspace = true jsonrpsee.workspace = true
serde_json.workspace = true serde_json.workspace = true
futures.workspace = true futures.workspace = true
async-trait = "0.1.89"
arc-swap = "1.8.1" arc-swap = "1.8.1"
[features] [features]

View File

@ -11,50 +11,50 @@
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101", "channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [ "initial_accounts": [
{ {
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000 "balance": 10000
}, },
{ {
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000 "balance": 20000
} }
], ],
"initial_commitments": [ "initial_commitments": [
{ {
"npk":[ "npk": [
177, 139,
64, 19,
1, 158,
11, 11,
87, 155,
38,
254,
159,
231, 231,
165, 85,
1, 206,
94, 132,
64, 228,
137, 220,
243, 114,
76, 145,
249, 89,
101, 113,
251, 156,
129, 238,
33, 142,
101, 242,
189, 74,
30, 182,
42, 91,
11, 43,
191, 100,
34, 6,
103, 190,
186, 31,
227, 15,
230 31,
] , 88,
96,
204
],
"account": { "account": {
"program_owner": [ "program_owner": [
0, 0,
@ -73,38 +73,38 @@
}, },
{ {
"npk": [ "npk": [
32, 173,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134, 134,
135, 33,
210, 223,
143, 54,
87, 226,
232, 10,
71,
215, 215,
128, 254,
194, 143,
120, 172,
113, 24,
224, 244,
4, 243,
165 208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
], ],
"account": { "account": {
"program_owner": [ "program_owner": [
@ -157,4 +157,4 @@
37, 37,
37 37
] ]
} }

View File

@ -363,12 +363,16 @@ impl From<ProgramDeploymentMessage> for nssa::program_deployment_transaction::Me
// WitnessSet conversions // WitnessSet conversions
// ============================================================================ // ============================================================================
impl TryFrom<nssa::public_transaction::WitnessSet> for WitnessSet { impl From<nssa::public_transaction::WitnessSet> for WitnessSet {
type Error = (); fn from(value: nssa::public_transaction::WitnessSet) -> Self {
Self {
fn try_from(_value: nssa::public_transaction::WitnessSet) -> Result<Self, Self::Error> { signatures_and_public_keys: value
// Public transaction witness sets don't have proofs, so we can't convert them directly .signatures_and_public_keys()
Err(()) .iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: None,
}
} }
} }
@ -380,7 +384,7 @@ impl From<nssa::privacy_preserving_transaction::witness_set::WitnessSet> for Wit
.into_iter() .into_iter()
.map(|(sig, pk)| (sig.into(), pk.into())) .map(|(sig, pk)| (sig.into(), pk.into()))
.collect(), .collect(),
proof: proof.into(), proof: Some(proof.into()),
} }
} }
} }
@ -400,7 +404,9 @@ impl TryFrom<WitnessSet> for nssa::privacy_preserving_transaction::witness_set::
Ok(Self::from_raw_parts( Ok(Self::from_raw_parts(
signatures_and_public_keys, signatures_and_public_keys,
proof.into(), proof
.map(Into::into)
.ok_or_else(|| nssa::error::NssaError::InvalidInput("Missing proof".to_owned()))?,
)) ))
} }
} }
@ -420,14 +426,7 @@ impl From<nssa::PublicTransaction> for PublicTransaction {
Self { Self {
hash, hash,
message: message.into(), message: message.into(),
witness_set: WitnessSet { witness_set: witness_set.into(),
signatures_and_public_keys: witness_set
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: Proof(vec![]), // Public transactions don't have proofs
},
} }
} }
} }

View File

@ -241,7 +241,7 @@ pub struct PrivacyPreservingMessage {
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct WitnessSet { pub struct WitnessSet {
pub signatures_and_public_keys: Vec<(Signature, PublicKey)>, pub signatures_and_public_keys: Vec<(Signature, PublicKey)>,
pub proof: Proof, pub proof: Option<Proof>,
} }
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]

View File

@ -30,16 +30,22 @@ pub trait Rpc {
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned>; async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getBlockById")] #[method(name = "getBlockById")]
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned>; async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getBlockByHash")] #[method(name = "getBlockByHash")]
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned>; async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getAccount")] #[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>; async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")] #[method(name = "getTransaction")]
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>; async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned>;
#[method(name = "getBlocks")] #[method(name = "getBlocks")]
async fn get_blocks( async fn get_blocks(

View File

@ -3,7 +3,7 @@ use std::net::SocketAddr;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
pub use indexer_core::config::*; pub use indexer_core::config::*;
use indexer_service_rpc::RpcServer as _; use indexer_service_rpc::RpcServer as _;
use jsonrpsee::server::Server; use jsonrpsee::server::{Server, ServerHandle};
use log::{error, info}; use log::{error, info};
pub mod service; pub mod service;
@ -13,10 +13,11 @@ pub mod mock_service;
pub struct IndexerHandle { pub struct IndexerHandle {
addr: SocketAddr, addr: SocketAddr,
server_handle: Option<jsonrpsee::server::ServerHandle>, /// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>,
} }
impl IndexerHandle { impl IndexerHandle {
const fn new(addr: SocketAddr, server_handle: jsonrpsee::server::ServerHandle) -> Self { const fn new(addr: SocketAddr, server_handle: ServerHandle) -> Self {
Self { Self {
addr, addr,
server_handle: Some(server_handle), server_handle: Some(server_handle),
@ -28,6 +29,7 @@ impl IndexerHandle {
self.addr self.addr
} }
/// Wait for all Indexer tasks to stop.
pub async fn stopped(mut self) { pub async fn stopped(mut self) {
let handle = self let handle = self
.server_handle .server_handle
@ -37,15 +39,11 @@ impl IndexerHandle {
handle.stopped().await; handle.stopped().await;
} }
#[expect(
clippy::redundant_closure_for_method_calls,
reason = "Clippy suggested path jsonrpsee::jsonrpsee_server::ServerHandle is not accessible"
)]
#[must_use] #[must_use]
pub fn is_stopped(&self) -> bool { pub fn is_healthy(&self) -> bool {
self.server_handle self.server_handle
.as_ref() .as_ref()
.is_none_or(|handle| handle.is_stopped()) .is_some_and(|handle| !handle.is_stopped())
} }
} }

View File

@ -15,7 +15,10 @@ use indexer_service_protocol::{
ProgramDeploymentTransaction, ProgramId, PublicMessage, PublicTransaction, Signature, ProgramDeploymentTransaction, ProgramId, PublicMessage, PublicTransaction, Signature,
Transaction, ValidityWindow, WitnessSet, Transaction, ValidityWindow, WitnessSet,
}; };
use jsonrpsee::{core::SubscriptionResult, types::ErrorObjectOwned}; use jsonrpsee::{
core::{SubscriptionResult, async_trait},
types::ErrorObjectOwned,
};
/// A mock implementation of the `IndexerService` RPC for testing purposes. /// A mock implementation of the `IndexerService` RPC for testing purposes.
pub struct MockIndexerService { pub struct MockIndexerService {
@ -92,7 +95,7 @@ impl MockIndexerService {
}, },
witness_set: WitnessSet { witness_set: WitnessSet {
signatures_and_public_keys: vec![], signatures_and_public_keys: vec![],
proof: indexer_service_protocol::Proof(vec![0; 32]), proof: None,
}, },
}), }),
// PrivacyPreserving transactions // PrivacyPreserving transactions
@ -125,7 +128,7 @@ impl MockIndexerService {
}, },
witness_set: WitnessSet { witness_set: WitnessSet {
signatures_and_public_keys: vec![], signatures_and_public_keys: vec![],
proof: indexer_service_protocol::Proof(vec![0; 32]), proof: Some(indexer_service_protocol::Proof(vec![0; 32])),
}, },
}), }),
// ProgramDeployment transactions (rare) // ProgramDeployment transactions (rare)
@ -172,7 +175,7 @@ impl MockIndexerService {
} }
} }
#[async_trait::async_trait] #[async_trait]
impl indexer_service_rpc::RpcServer for MockIndexerService { impl indexer_service_rpc::RpcServer for MockIndexerService {
async fn subscribe_to_finalized_blocks( async fn subscribe_to_finalized_blocks(
&self, &self,
@ -199,26 +202,23 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
}) })
} }
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned> { async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
self.blocks Ok(self
.blocks
.iter() .iter()
.find(|b| b.header.block_id == block_id) .find(|b| b.header.block_id == block_id)
.cloned() .cloned())
.ok_or_else(|| {
ErrorObjectOwned::owned(
-32001,
format!("Block with ID {block_id} not found"),
None::<()>,
)
})
} }
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned> { async fn get_block_by_hash(
self.blocks &self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.blocks
.iter() .iter()
.find(|b| b.header.hash == block_hash) .find(|b| b.header.hash == block_hash)
.cloned() .cloned())
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Block with hash not found", None::<()>))
} }
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> { async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
@ -228,11 +228,11 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>)) .ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
} }
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned> { async fn get_transaction(
self.transactions &self,
.get(&tx_hash) tx_hash: HashType,
.map(|(tx, _)| tx.clone()) ) -> Result<Option<Transaction>, ErrorObjectOwned> {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>)) Ok(self.transactions.get(&tx_hash).map(|(tx, _)| tx.clone()))
} }
async fn get_blocks( async fn get_blocks(

View File

@ -7,7 +7,7 @@ use indexer_core::{IndexerCore, config::IndexerConfig};
use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Transaction}; use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Transaction};
use jsonrpsee::{ use jsonrpsee::{
SubscriptionSink, SubscriptionSink,
core::{Serialize, SubscriptionResult}, core::{Serialize, SubscriptionResult, async_trait},
types::{ErrorCode, ErrorObject, ErrorObjectOwned}, types::{ErrorCode, ErrorObject, ErrorObjectOwned},
}; };
use log::{debug, error, info, warn}; use log::{debug, error, info, warn};
@ -30,7 +30,7 @@ impl IndexerService {
} }
} }
#[async_trait::async_trait] #[async_trait]
impl indexer_service_rpc::RpcServer for IndexerService { impl indexer_service_rpc::RpcServer for IndexerService {
async fn subscribe_to_finalized_blocks( async fn subscribe_to_finalized_blocks(
&self, &self,
@ -52,22 +52,25 @@ impl indexer_service_rpc::RpcServer for IndexerService {
self.indexer.store.get_last_block_id().map_err(db_error) self.indexer.store.get_last_block_id().map_err(db_error)
} }
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned> { async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self Ok(self
.indexer .indexer
.store .store
.get_block_at_id(block_id) .get_block_at_id(block_id)
.map_err(db_error)? .map_err(db_error)?
.into()) .map(Into::into))
} }
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned> { async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self Ok(self
.indexer .indexer
.store .store
.get_block_by_hash(block_hash.0) .get_block_by_hash(block_hash.0)
.map_err(db_error)? .map_err(db_error)?
.into()) .map(Into::into))
} }
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> { async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
@ -80,13 +83,16 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into()) .into())
} }
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned> { async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned> {
Ok(self Ok(self
.indexer .indexer
.store .store
.get_transaction_by_hash(tx_hash.0) .get_transaction_by_hash(tx_hash.0)
.map_err(db_error)? .map_err(db_error)?
.into()) .map(Into::into))
} }
async fn get_blocks( async fn get_blocks(

View File

@ -11,7 +11,7 @@ workspace = true
nssa_core = { workspace = true, features = ["host"] } nssa_core = { workspace = true, features = ["host"] }
nssa.workspace = true nssa.workspace = true
sequencer_core = { workspace = true, features = ["default", "testnet"] } sequencer_core = { workspace = true, features = ["default", "testnet"] }
sequencer_runner.workspace = true sequencer_service.workspace = true
wallet.workspace = true wallet.workspace = true
common.workspace = true common.workspace = true
key_protocol.workspace = true key_protocol.workspace = true
@ -19,6 +19,7 @@ indexer_service.workspace = true
serde_json.workspace = true serde_json.workspace = true
token_core.workspace = true token_core.workspace = true
indexer_service_rpc.workspace = true indexer_service_rpc.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet-ffi.workspace = true wallet-ffi.workspace = true
url.workspace = true url.workspace = true
@ -26,11 +27,9 @@ url.workspace = true
anyhow.workspace = true anyhow.workspace = true
env_logger.workspace = true env_logger.workspace = true
log.workspace = true log.workspace = true
base64.workspace = true
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
hex.workspace = true hex.workspace = true
tempfile.workspace = true tempfile.workspace = true
borsh.workspace = true
bytesize.workspace = true bytesize.workspace = true
futures.workspace = true futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] } testcontainers = { version = "0.27.0", features = ["docker-compose"] }

View File

@ -59,11 +59,11 @@ impl InitialData {
let mut private_charlie_key_chain = KeyChain::new_os_random(); let mut private_charlie_key_chain = KeyChain::new_os_random();
let mut private_charlie_account_id = let mut private_charlie_account_id =
AccountId::from(&private_charlie_key_chain.nullifer_public_key); AccountId::from(&private_charlie_key_chain.nullifier_public_key);
let mut private_david_key_chain = KeyChain::new_os_random(); let mut private_david_key_chain = KeyChain::new_os_random();
let mut private_david_account_id = let mut private_david_account_id =
AccountId::from(&private_david_key_chain.nullifer_public_key); AccountId::from(&private_david_key_chain.nullifier_public_key);
// Ensure consistent ordering // Ensure consistent ordering
if private_charlie_account_id > private_david_account_id { if private_charlie_account_id > private_david_account_id {
@ -120,7 +120,7 @@ impl InitialData {
self.private_accounts self.private_accounts
.iter() .iter()
.map(|(key_chain, account)| CommitmentsInitialData { .map(|(key_chain, account)| CommitmentsInitialData {
npk: key_chain.nullifer_public_key.clone(), npk: key_chain.nullifier_public_key.clone(),
account: account.clone(), account: account.clone(),
}) })
.collect() .collect()
@ -138,7 +138,7 @@ impl InitialData {
}) })
}) })
.chain(self.private_accounts.iter().map(|(key_chain, account)| { .chain(self.private_accounts.iter().map(|(key_chain, account)| {
let account_id = AccountId::from(&key_chain.nullifer_public_key); let account_id = AccountId::from(&key_chain.nullifier_public_key);
InitialAccountData::Private(Box::new(InitialAccountDataPrivate { InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
account_id, account_id,
account: account.clone(), account: account.clone(),
@ -204,7 +204,6 @@ pub fn sequencer_config(
Ok(SequencerConfig { Ok(SequencerConfig {
home, home,
override_rust_log: None,
genesis_id: 1, genesis_id: 1,
is_genesis_random: true, is_genesis_random: true,
max_num_tx_in_block, max_num_tx_in_block,
@ -212,7 +211,6 @@ pub fn sequencer_config(
mempool_max_size, mempool_max_size,
block_create_timeout, block_create_timeout,
retry_pending_blocks_timeout: Duration::from_mins(2), retry_pending_blocks_timeout: Duration::from_mins(2),
port: 0,
initial_accounts: initial_data.sequencer_initial_accounts(), initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(), initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32], signing_key: [37; 32],
@ -236,7 +234,6 @@ pub fn wallet_config(
initial_data: &InitialData, initial_data: &InitialData,
) -> Result<WalletConfig> { ) -> Result<WalletConfig> {
Ok(WalletConfig { Ok(WalletConfig {
override_rust_log: None,
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr) sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?, .context("Failed to convert sequencer addr to URL")?,
seq_poll_timeout: Duration::from_secs(30), seq_poll_timeout: Duration::from_secs(30),

View File

@ -3,15 +3,15 @@
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock}; use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
use anyhow::{Context as _, Result, bail}; use anyhow::{Context as _, Result, bail};
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; use common::{HashType, transaction::NSSATransaction};
use common::{HashType, sequencer_client::SequencerClient, transaction::NSSATransaction};
use futures::FutureExt as _; use futures::FutureExt as _;
use indexer_service::IndexerHandle; use indexer_service::IndexerHandle;
use log::{debug, error, warn}; use log::{debug, error, warn};
use nssa::{AccountId, PrivacyPreservingTransaction}; use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment; use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _}; use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_runner::SequencerHandle; use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir; use tempfile::TempDir;
use testcontainers::compose::DockerCompose; use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides}; use wallet::{WalletCore, config::WalletConfigOverrides};
@ -38,7 +38,8 @@ pub struct TestContext {
indexer_client: IndexerClient, indexer_client: IndexerClient,
wallet: WalletCore, wallet: WalletCore,
wallet_password: String, wallet_password: String,
sequencer_handle: SequencerHandle, /// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
indexer_handle: IndexerHandle, indexer_handle: IndexerHandle,
bedrock_compose: DockerCompose, bedrock_compose: DockerCompose,
_temp_indexer_dir: TempDir, _temp_indexer_dir: TempDir,
@ -90,8 +91,9 @@ impl TestContext {
.context("Failed to convert sequencer addr to URL")?; .context("Failed to convert sequencer addr to URL")?;
let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr()) let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr())
.context("Failed to convert indexer addr to URL")?; .context("Failed to convert indexer addr to URL")?;
let sequencer_client = let sequencer_client = SequencerClientBuilder::default()
SequencerClient::new(sequencer_url).context("Failed to create sequencer client")?; .build(sequencer_url)
.context("Failed to create sequencer client")?;
let indexer_client = IndexerClient::new(&indexer_url) let indexer_client = IndexerClient::new(&indexer_url)
.await .await
.context("Failed to create indexer client")?; .context("Failed to create indexer client")?;
@ -102,7 +104,7 @@ impl TestContext {
wallet, wallet,
wallet_password, wallet_password,
bedrock_compose, bedrock_compose,
sequencer_handle, sequencer_handle: Some(sequencer_handle),
indexer_handle, indexer_handle,
_temp_indexer_dir: temp_indexer_dir, _temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir, _temp_sequencer_dir: temp_sequencer_dir,
@ -229,7 +231,7 @@ impl TestContext {
) )
.context("Failed to create Sequencer config")?; .context("Failed to create Sequencer config")?;
let sequencer_handle = sequencer_runner::startup_sequencer(config).await?; let sequencer_handle = sequencer_service::run(config, 0).await?;
Ok((sequencer_handle, temp_sequencer_dir)) Ok((sequencer_handle, temp_sequencer_dir))
} }
@ -333,18 +335,20 @@ impl Drop for TestContext {
wallet_password: _, wallet_password: _,
} = self; } = self;
if sequencer_handle.is_finished() { let sequencer_handle = sequencer_handle
let Err(err) = self .take()
.sequencer_handle .expect("Sequencer handle should be present in TestContext drop");
.run_forever() if !sequencer_handle.is_healthy() {
let Err(err) = sequencer_handle
.failed()
.now_or_never() .now_or_never()
.expect("Future is finished and should be ready"); .expect("Sequencer handle should not be running");
error!( error!(
"Sequencer handle has unexpectedly finished before TestContext drop with error: {err:#}" "Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
); );
} }
if indexer_handle.is_stopped() { if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop"); error!("Indexer handle has unexpectedly stopped before TestContext drop");
} }
@ -459,15 +463,8 @@ pub async fn fetch_privacy_preserving_tx(
seq_client: &SequencerClient, seq_client: &SequencerClient,
tx_hash: HashType, tx_hash: HashType,
) -> PrivacyPreservingTransaction { ) -> PrivacyPreservingTransaction {
let transaction_encoded = seq_client let tx = seq_client.get_transaction(tx_hash).await.unwrap().unwrap();
.get_transaction_by_hash(tx_hash)
.await
.unwrap()
.transaction
.unwrap();
let tx_bytes = BASE64.decode(transaction_encoded).unwrap();
let tx = borsh::from_slice(&tx_bytes).unwrap();
match tx { match tx {
NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => { NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => {
privacy_preserving_transaction privacy_preserving_transaction
@ -480,8 +477,10 @@ pub async fn verify_commitment_is_in_state(
commitment: Commitment, commitment: Commitment,
seq_client: &SequencerClient, seq_client: &SequencerClient,
) -> bool { ) -> bool {
matches!( seq_client
seq_client.get_proof_for_commitment(commitment).await, .get_proof_for_commitment(commitment)
Ok(Some(_)) .await
) .ok()
.flatten()
.is_some()
} }

View File

@ -7,6 +7,7 @@ use anyhow::Result;
use integration_tests::TestContext; use integration_tests::TestContext;
use log::info; use log::info;
use nssa::program::Program; use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, Command,
@ -21,8 +22,7 @@ async fn get_existing_account() -> Result<()> {
let account = ctx let account = ctx
.sequencer_client() .sequencer_client()
.get_account(ctx.existing_public_accounts()[0]) .get_account(ctx.existing_public_accounts()[0])
.await? .await?;
.account;
assert_eq!( assert_eq!(
account.program_owner, account.program_owner,

View File

@ -9,6 +9,7 @@ use std::time::Duration;
use anyhow::Result; use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id}; use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info; use log::info;
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, SubcommandReturnValue, Command, SubcommandReturnValue,
@ -194,20 +195,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx let user_holding_a_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_1) .get_account(recipient_account_id_1)
.await? .await?;
.account;
let user_holding_b_acc = ctx let user_holding_b_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_2) .get_account(recipient_account_id_2)
.await? .await?;
.account;
let user_holding_lp_acc = ctx let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
assert_eq!( assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -243,20 +238,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx let user_holding_a_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_1) .get_account(recipient_account_id_1)
.await? .await?;
.account;
let user_holding_b_acc = ctx let user_holding_b_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_2) .get_account(recipient_account_id_2)
.await? .await?;
.account;
let user_holding_lp_acc = ctx let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
assert_eq!( assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -292,20 +281,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx let user_holding_a_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_1) .get_account(recipient_account_id_1)
.await? .await?;
.account;
let user_holding_b_acc = ctx let user_holding_b_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_2) .get_account(recipient_account_id_2)
.await? .await?;
.account;
let user_holding_lp_acc = ctx let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
assert_eq!( assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -342,20 +325,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx let user_holding_a_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_1) .get_account(recipient_account_id_1)
.await? .await?;
.account;
let user_holding_b_acc = ctx let user_holding_b_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_2) .get_account(recipient_account_id_2)
.await? .await?;
.account;
let user_holding_lp_acc = ctx let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
assert_eq!( assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),
@ -392,20 +369,14 @@ async fn amm_public() -> Result<()> {
let user_holding_a_acc = ctx let user_holding_a_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_1) .get_account(recipient_account_id_1)
.await? .await?;
.account;
let user_holding_b_acc = ctx let user_holding_b_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_2) .get_account(recipient_account_id_2)
.await? .await?;
.account;
let user_holding_lp_acc = ctx let user_holding_lp_acc = ctx.sequencer_client().get_account(user_holding_lp).await?;
.sequencer_client()
.get_account(user_holding_lp)
.await?
.account;
assert_eq!( assert_eq!(
u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()), u128::from_le_bytes(user_holding_a_acc.data[33..].try_into().unwrap()),

View File

@ -8,6 +8,7 @@ use integration_tests::{
use log::info; use log::info;
use nssa::{AccountId, program::Program}; use nssa::{AccountId, program::Program};
use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point}; use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point};
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, SubcommandReturnValue, Command, SubcommandReturnValue,
@ -135,7 +136,7 @@ async fn deshielded_transfer_to_public_account() -> Result<()> {
let acc_2_balance = ctx.sequencer_client().get_account_balance(to).await?; let acc_2_balance = ctx.sequencer_client().get_account_balance(to).await?;
assert_eq!(from_acc.balance, 9900); assert_eq!(from_acc.balance, 9900);
assert_eq!(acc_2_balance.balance, 20100); assert_eq!(acc_2_balance, 20100);
info!("Successfully deshielded transfer to public account"); info!("Successfully deshielded transfer to public account");
@ -175,7 +176,7 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send { let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from), from: format_private_account_id(from),
to: None, to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100, amount: 100,
}); });
@ -245,7 +246,7 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> {
let acc_from_balance = ctx.sequencer_client().get_account_balance(from).await?; let acc_from_balance = ctx.sequencer_client().get_account_balance(from).await?;
assert_eq!(acc_from_balance.balance, 9900); assert_eq!(acc_from_balance, 9900);
assert_eq!(acc_to.balance, 20100); assert_eq!(acc_to.balance, 20100);
info!("Successfully shielded transfer to owned private account"); info!("Successfully shielded transfer to owned private account");
@ -290,7 +291,7 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> {
.await .await
); );
assert_eq!(acc_1_balance.balance, 9900); assert_eq!(acc_1_balance, 9900);
info!("Successfully shielded transfer to foreign account"); info!("Successfully shielded transfer to foreign account");
@ -335,7 +336,7 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send { let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from), from: format_private_account_id(from),
to: None, to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100, amount: 100,
}); });

View File

@ -4,6 +4,7 @@ use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id}; use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info; use log::info;
use nssa::program::Program; use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, SubcommandReturnValue, Command, SubcommandReturnValue,
@ -41,8 +42,8 @@ async fn successful_transfer_to_existing_account() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900); assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance.balance, 20100); assert_eq!(acc_2_balance, 20100);
Ok(()) Ok(())
} }
@ -97,8 +98,8 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900); assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance.balance, 100); assert_eq!(acc_2_balance, 100);
Ok(()) Ok(())
} }
@ -134,8 +135,8 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 10000); assert_eq!(acc_1_balance, 10000);
assert_eq!(acc_2_balance.balance, 20000); assert_eq!(acc_2_balance, 20000);
Ok(()) Ok(())
} }
@ -171,8 +172,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900); assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance.balance, 20100); assert_eq!(acc_2_balance, 20100);
info!("First TX Success!"); info!("First TX Success!");
@ -203,8 +204,8 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9800); assert_eq!(acc_1_balance, 9800);
assert_eq!(acc_2_balance.balance, 20200); assert_eq!(acc_2_balance, 20200);
info!("Second TX Success!"); info!("Second TX Success!");
@ -230,11 +231,7 @@ async fn initialize_public_account() -> Result<()> {
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Checking correct execution"); info!("Checking correct execution");
let account = ctx let account = ctx.sequencer_client().get_account(account_id).await?;
.sequencer_client()
.get_account(account_id)
.await?
.account;
assert_eq!( assert_eq!(
account.program_owner, account.program_owner,

View File

@ -8,11 +8,12 @@ use std::time::Duration;
use anyhow::Result; use anyhow::Result;
use bytesize::ByteSize; use bytesize::ByteSize;
use common::{block::HashableBlockData, transaction::NSSATransaction}; use common::transaction::NSSATransaction;
use integration_tests::{ use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, config::SequencerPartialConfig, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, config::SequencerPartialConfig,
}; };
use nssa::program::Program; use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
#[test] #[test]
@ -36,7 +37,10 @@ async fn reject_oversized_transaction() -> Result<()> {
let tx = nssa::ProgramDeploymentTransaction::new(message); let tx = nssa::ProgramDeploymentTransaction::new(message);
// Try to submit the transaction and expect an error // Try to submit the transaction and expect an error
let result = ctx.sequencer_client().send_tx_program(tx).await; let result = ctx
.sequencer_client()
.send_transaction(NSSATransaction::ProgramDeployment(tx))
.await;
assert!( assert!(
result.is_err(), result.is_err(),
@ -74,7 +78,10 @@ async fn accept_transaction_within_limit() -> Result<()> {
let tx = nssa::ProgramDeploymentTransaction::new(message); let tx = nssa::ProgramDeploymentTransaction::new(message);
// This should succeed // This should succeed
let result = ctx.sequencer_client().send_tx_program(tx).await; let result = ctx
.sequencer_client()
.send_transaction(NSSATransaction::ProgramDeployment(tx))
.await;
assert!( assert!(
result.is_ok(), result.is_ok(),
@ -112,33 +119,38 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> {
let burner_id = Program::new(burner_bytecode.clone())?.id(); let burner_id = Program::new(burner_bytecode.clone())?.id();
let chain_caller_id = Program::new(chain_caller_bytecode.clone())?.id(); let chain_caller_id = Program::new(chain_caller_bytecode.clone())?.id();
let initial_block_height = ctx.sequencer_client().get_last_block().await?.last_block; let initial_block_height = ctx.sequencer_client().get_last_block_id().await?;
// Submit both program deployments // Submit both program deployments
ctx.sequencer_client() ctx.sequencer_client()
.send_tx_program(nssa::ProgramDeploymentTransaction::new( .send_transaction(NSSATransaction::ProgramDeployment(
nssa::program_deployment_transaction::Message::new(burner_bytecode), nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(burner_bytecode),
),
)) ))
.await?; .await?;
ctx.sequencer_client() ctx.sequencer_client()
.send_tx_program(nssa::ProgramDeploymentTransaction::new( .send_transaction(NSSATransaction::ProgramDeployment(
nssa::program_deployment_transaction::Message::new(chain_caller_bytecode), nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(chain_caller_bytecode),
),
)) ))
.await?; .await?;
// Wait for first block // Wait for first block
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let block1_response = ctx let block1 = ctx
.sequencer_client() .sequencer_client()
.get_block(initial_block_height + 1) .get_block(initial_block_height + 1)
.await?; .await?
let block1: HashableBlockData = borsh::from_slice(&block1_response.block)?; .unwrap();
// Check which program is in block 1 // Check which program is in block 1
let get_program_ids = |block: &HashableBlockData| -> Vec<nssa::ProgramId> { let get_program_ids = |block: &common::block::Block| -> Vec<nssa::ProgramId> {
block block
.body
.transactions .transactions
.iter() .iter()
.filter_map(|tx| { .filter_map(|tx| {
@ -168,11 +180,11 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> {
// Wait for second block // Wait for second block
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let block2_response = ctx let block2 = ctx
.sequencer_client() .sequencer_client()
.get_block(initial_block_height + 2) .get_block(initial_block_height + 2)
.await?; .await?
let block2: HashableBlockData = borsh::from_slice(&block2_response.block)?; .unwrap();
let block2_program_ids = get_program_ids(&block2); let block2_program_ids = get_program_ids(&block2);
// The other program should be in block 2 // The other program should be in block 2

View File

@ -22,12 +22,8 @@ async fn indexer_test_run() -> Result<()> {
// RUN OBSERVATION // RUN OBSERVATION
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await; tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
let last_block_seq = ctx let last_block_seq =
.sequencer_client() sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client()).await?;
.get_last_block()
.await
.unwrap()
.last_block;
info!("Last block on seq now is {last_block_seq}"); info!("Last block on seq now is {last_block_seq}");
@ -100,20 +96,22 @@ async fn indexer_state_consistency() -> Result<()> {
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
info!("Checking correct balance move"); info!("Checking correct balance move");
let acc_1_balance = ctx let acc_1_balance = sequencer_service_rpc::RpcClient::get_account_balance(
.sequencer_client() ctx.sequencer_client(),
.get_account_balance(ctx.existing_public_accounts()[0]) ctx.existing_public_accounts()[0],
.await?; )
let acc_2_balance = ctx .await?;
.sequencer_client() let acc_2_balance = sequencer_service_rpc::RpcClient::get_account_balance(
.get_account_balance(ctx.existing_public_accounts()[1]) ctx.sequencer_client(),
.await?; ctx.existing_public_accounts()[1],
)
.await?;
info!("Balance of sender: {acc_1_balance:#?}"); info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}"); info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance.balance, 9900); assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance.balance, 20100); assert_eq!(acc_2_balance, 20100);
// WAIT // WAIT
info!("Waiting for indexer to parse blocks"); info!("Waiting for indexer to parse blocks");
@ -131,16 +129,16 @@ async fn indexer_state_consistency() -> Result<()> {
.unwrap(); .unwrap();
info!("Checking correct state transition"); info!("Checking correct state transition");
let acc1_seq_state = ctx let acc1_seq_state = sequencer_service_rpc::RpcClient::get_account(
.sequencer_client() ctx.sequencer_client(),
.get_account(ctx.existing_public_accounts()[0]) ctx.existing_public_accounts()[0],
.await? )
.account; .await?;
let acc2_seq_state = ctx let acc2_seq_state = sequencer_service_rpc::RpcClient::get_account(
.sequencer_client() ctx.sequencer_client(),
.get_account(ctx.existing_public_accounts()[1]) ctx.existing_public_accounts()[1],
.await? )
.account; .await?;
assert_eq!(acc1_ind_state, acc1_seq_state.into()); assert_eq!(acc1_ind_state, acc1_seq_state.into());
assert_eq!(acc2_ind_state, acc2_seq_state.into()); assert_eq!(acc2_ind_state, acc2_seq_state.into());

View File

@ -14,6 +14,7 @@ use integration_tests::{
use key_protocol::key_management::key_tree::chain_index::ChainIndex; use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info; use log::info;
use nssa::{AccountId, program::Program}; use nssa::{AccountId, program::Program};
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, SubcommandReturnValue, Command, SubcommandReturnValue,
@ -70,7 +71,7 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
let command = Command::AuthTransfer(AuthTransferSubcommand::Send { let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from), from: format_private_account_id(from),
to: None, to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100, amount: 100,
}); });
@ -305,8 +306,8 @@ async fn restore_keys_from_seed() -> Result<()> {
.get_account_balance(to_account_id4) .get_account_balance(to_account_id4)
.await?; .await?;
assert_eq!(acc3.balance, 91); // 102 - 11 assert_eq!(acc3, 91); // 102 - 11
assert_eq!(acc4.balance, 114); // 103 + 11 assert_eq!(acc4, 114); // 103 + 11
info!("Successfully restored keys and verified transactions"); info!("Successfully restored keys and verified transactions");

View File

@ -13,6 +13,7 @@ use integration_tests::{
format_public_account_id, verify_commitment_is_in_state, format_public_account_id, verify_commitment_is_in_state,
}; };
use log::info; use log::info;
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
Command, SubcommandReturnValue, Command, SubcommandReturnValue,
@ -46,8 +47,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()>
let pinata_balance_pre = ctx let pinata_balance_pre = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
let claim_result = wallet::cli::execute_subcommand( let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(), ctx.wallet_mut(),
@ -70,8 +70,7 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()>
let pinata_balance_post = ctx let pinata_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre); assert_eq!(pinata_balance_post, pinata_balance_pre);
@ -102,8 +101,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()
let pinata_balance_pre = ctx let pinata_balance_pre = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
let claim_result = wallet::cli::execute_subcommand( let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(), ctx.wallet_mut(),
@ -126,8 +124,7 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()
let pinata_balance_post = ctx let pinata_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre); assert_eq!(pinata_balance_post, pinata_balance_pre);
@ -146,8 +143,7 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> {
let pinata_balance_pre = ctx let pinata_balance_pre = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -158,14 +154,12 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> {
let pinata_balance_post = ctx let pinata_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
let winner_balance_post = ctx let winner_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[0]) .get_account_balance(ctx.existing_public_accounts()[0])
.await? .await?;
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);
assert_eq!(winner_balance_post, 10000 + pinata_prize); assert_eq!(winner_balance_post, 10000 + pinata_prize);
@ -187,8 +181,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> {
let pinata_balance_pre = ctx let pinata_balance_pre = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash: _ } = result else { let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash: _ } = result else {
@ -211,8 +204,7 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> {
let pinata_balance_post = ctx let pinata_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);
@ -268,8 +260,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
let pinata_balance_pre = ctx let pinata_balance_pre = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -285,8 +276,7 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
let pinata_balance_post = ctx let pinata_balance_post = ctx
.sequencer_client() .sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap()) .get_account_balance(PINATA_BASE58.parse().unwrap())
.await? .await?;
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize); assert_eq!(pinata_balance_post, pinata_balance_pre - pinata_prize);

View File

@ -6,11 +6,13 @@
use std::{path::PathBuf, time::Duration}; use std::{path::PathBuf, time::Duration};
use anyhow::Result; use anyhow::Result;
use common::transaction::NSSATransaction;
use integration_tests::{ use integration_tests::{
NSSA_PROGRAM_FOR_TEST_DATA_CHANGER, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, NSSA_PROGRAM_FOR_TEST_DATA_CHANGER, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext,
}; };
use log::info; use log::info;
use nssa::{AccountId, program::Program}; use nssa::{AccountId, program::Program};
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
use wallet::cli::Command; use wallet::cli::Command;
@ -47,18 +49,17 @@ async fn deploy_and_execute_program() -> Result<()> {
)?; )?;
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]); let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[]);
let transaction = nssa::PublicTransaction::new(message, witness_set); let transaction = nssa::PublicTransaction::new(message, witness_set);
let _response = ctx.sequencer_client().send_tx_public(transaction).await?; let _response = ctx
.sequencer_client()
.send_transaction(NSSATransaction::Public(transaction))
.await?;
info!("Waiting for next block creation"); info!("Waiting for next block creation");
// Waiting for long time as it may take some time for such a big transaction to be included in a // Waiting for long time as it may take some time for such a big transaction to be included in a
// block // block
tokio::time::sleep(Duration::from_secs(2 * TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; tokio::time::sleep(Duration::from_secs(2 * TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let post_state_account = ctx let post_state_account = ctx.sequencer_client().get_account(account_id).await?;
.sequencer_client()
.get_account(account_id)
.await?
.account;
assert_eq!(post_state_account.program_owner, data_changer.id()); assert_eq!(post_state_account.program_owner, data_changer.id());
assert_eq!(post_state_account.balance, 0); assert_eq!(post_state_account.balance, 0);

View File

@ -14,6 +14,7 @@ use integration_tests::{
use key_protocol::key_management::key_tree::chain_index::ChainIndex; use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info; use log::info;
use nssa::program::Program; use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
use token_core::{TokenDefinition, TokenHolding}; use token_core::{TokenDefinition, TokenHolding};
use tokio::test; use tokio::test;
use wallet::cli::{ use wallet::cli::{
@ -92,8 +93,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx let definition_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(definition_account_id) .get_account(definition_account_id)
.await? .await?;
.account;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(definition_acc.program_owner, Program::token().id()); assert_eq!(definition_acc.program_owner, Program::token().id());
@ -110,8 +110,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let supply_acc = ctx let supply_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(supply_account_id) .get_account(supply_account_id)
.await? .await?;
.account;
// The account must be owned by the token program // The account must be owned by the token program
assert_eq!(supply_acc.program_owner, Program::token().id()); assert_eq!(supply_acc.program_owner, Program::token().id());
@ -143,8 +142,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let supply_acc = ctx let supply_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(supply_account_id) .get_account(supply_account_id)
.await? .await?;
.account;
assert_eq!(supply_acc.program_owner, Program::token().id()); assert_eq!(supply_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&supply_acc.data)?; let token_holding = TokenHolding::try_from(&supply_acc.data)?;
assert_eq!( assert_eq!(
@ -159,8 +157,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx let recipient_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id) .get_account(recipient_account_id)
.await? .await?;
.account;
assert_eq!(recipient_acc.program_owner, Program::token().id()); assert_eq!(recipient_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&recipient_acc.data)?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!( assert_eq!(
@ -188,8 +185,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx let definition_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(definition_account_id) .get_account(definition_account_id)
.await? .await?;
.account;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!( assert_eq!(
@ -205,8 +201,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx let recipient_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id) .get_account(recipient_account_id)
.await? .await?;
.account;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!( assert_eq!(
@ -236,8 +231,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let definition_acc = ctx let definition_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(definition_account_id) .get_account(definition_account_id)
.await? .await?;
.account;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!( assert_eq!(
@ -253,8 +247,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
let recipient_acc = ctx let recipient_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id) .get_account(recipient_account_id)
.await? .await?;
.account;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!( assert_eq!(
@ -341,8 +334,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
let definition_acc = ctx let definition_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(definition_account_id) .get_account(definition_account_id)
.await? .await?;
.account;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(definition_acc.program_owner, Program::token().id()); assert_eq!(definition_acc.program_owner, Program::token().id());
@ -405,8 +397,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
let definition_acc = ctx let definition_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(definition_account_id) .get_account(definition_account_id)
.await? .await?;
.account;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?; let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!( assert_eq!(
@ -506,8 +497,7 @@ async fn create_token_with_private_definition() -> Result<()> {
let supply_acc = ctx let supply_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(supply_account_id) .get_account(supply_account_id)
.await? .await?;
.account;
assert_eq!(supply_acc.program_owner, Program::token().id()); assert_eq!(supply_acc.program_owner, Program::token().id());
let token_holding = TokenHolding::try_from(&supply_acc.data)?; let token_holding = TokenHolding::try_from(&supply_acc.data)?;
@ -586,8 +576,7 @@ async fn create_token_with_private_definition() -> Result<()> {
let recipient_acc = ctx let recipient_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id_public) .get_account(recipient_account_id_public)
.await? .await?;
.account;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!( assert_eq!(
@ -882,8 +871,7 @@ async fn shielded_token_transfer() -> Result<()> {
let supply_acc = ctx let supply_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(supply_account_id) .get_account(supply_account_id)
.await? .await?;
.account;
let token_holding = TokenHolding::try_from(&supply_acc.data)?; let token_holding = TokenHolding::try_from(&supply_acc.data)?;
assert_eq!( assert_eq!(
token_holding, token_holding,
@ -1026,8 +1014,7 @@ async fn deshielded_token_transfer() -> Result<()> {
let recipient_acc = ctx let recipient_acc = ctx
.sequencer_client() .sequencer_client()
.get_account(recipient_account_id) .get_account(recipient_account_id)
.await? .await?;
.account;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?; let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!( assert_eq!(
token_holding, token_holding,
@ -1123,7 +1110,7 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
let subcommand = TokenProgramAgnosticSubcommand::Mint { let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_private_account_id(definition_account_id), definition: format_private_account_id(definition_account_id),
holder: None, holder: None,
holder_npk: Some(hex::encode(holder_keys.nullifer_public_key.0)), holder_npk: Some(hex::encode(holder_keys.nullifier_public_key.0)),
holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)), holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)),
amount: mint_amount, amount: mint_amount,
}; };

View File

@ -13,6 +13,7 @@ use std::time::{Duration, Instant};
use anyhow::Result; use anyhow::Result;
use bytesize::ByteSize; use bytesize::ByteSize;
use common::transaction::NSSATransaction;
use integration_tests::{ use integration_tests::{
TestContext, TestContext,
config::{InitialData, SequencerPartialConfig}, config::{InitialData, SequencerPartialConfig},
@ -30,6 +31,7 @@ use nssa_core::{
account::{AccountWithMetadata, Nonce, data::Data}, account::{AccountWithMetadata, Nonce, data::Data},
encryption::ViewingPublicKey, encryption::ViewingPublicKey,
}; };
use sequencer_service_rpc::RpcClient as _;
use tokio::test; use tokio::test;
pub(crate) struct TpsTestManager { pub(crate) struct TpsTestManager {
@ -153,10 +155,9 @@ pub async fn tps_test() -> Result<()> {
for (i, tx) in txs.into_iter().enumerate() { for (i, tx) in txs.into_iter().enumerate() {
let tx_hash = ctx let tx_hash = ctx
.sequencer_client() .sequencer_client()
.send_tx_public(tx) .send_transaction(NSSATransaction::Public(tx))
.await .await
.unwrap() .unwrap();
.tx_hash;
info!("Sent tx {i}"); info!("Sent tx {i}");
tx_hashes.push(tx_hash); tx_hashes.push(tx_hash);
} }
@ -170,15 +171,13 @@ pub async fn tps_test() -> Result<()> {
let tx_obj = ctx let tx_obj = ctx
.sequencer_client() .sequencer_client()
.get_transaction_by_hash(*tx_hash) .get_transaction(*tx_hash)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
log::warn!("Failed to get transaction by hash {tx_hash} with error: {err:#?}"); log::warn!("Failed to get transaction by hash {tx_hash} with error: {err:#?}");
}); });
if let Ok(tx_obj) = tx_obj if tx_obj.is_ok_and(|opt| opt.is_some()) {
&& tx_obj.transaction.is_some()
{
info!("Found tx {i} with hash {tx_hash}"); info!("Found tx {i} with hash {tx_hash}");
break; break;
} }

View File

@ -606,7 +606,7 @@ fn test_wallet_ffi_get_private_account_keys() -> Result<()> {
.unwrap() .unwrap()
.0; .0;
let expected_npk = &key_chain.nullifer_public_key; let expected_npk = &key_chain.nullifier_public_key;
let expected_vpk = &key_chain.viewing_public_key; let expected_vpk = &key_chain.viewing_public_key;
assert_eq!(&keys.npk(), expected_npk); assert_eq!(&keys.npk(), expected_npk);

View File

@ -19,10 +19,12 @@ serde.workspace = true
k256.workspace = true k256.workspace = true
sha2.workspace = true sha2.workspace = true
rand.workspace = true rand.workspace = true
base58.workspace = true
hex.workspace = true hex.workspace = true
aes-gcm.workspace = true aes-gcm.workspace = true
bip39.workspace = true bip39.workspace = true
hmac-sha512.workspace = true hmac-sha512.workspace = true
thiserror.workspace = true thiserror.workspace = true
itertools.workspace = true itertools.workspace = true
[dev-dependencies]
base58.workspace = true

View File

@ -39,7 +39,7 @@ impl KeyNode for ChildKeysPrivate {
value: ( value: (
KeyChain { KeyChain {
secret_spending_key: ssk, secret_spending_key: ssk,
nullifer_public_key: npk, nullifier_public_key: npk,
viewing_public_key: vpk, viewing_public_key: vpk,
private_key_holder: PrivateKeyHolder { private_key_holder: PrivateKeyHolder {
nullifier_secret_key: nsk, nullifier_secret_key: nsk,
@ -54,10 +54,7 @@ impl KeyNode for ChildKeysPrivate {
} }
fn nth_child(&self, cci: u32) -> Self { fn nth_child(&self, cci: u32) -> Self {
#[expect( #[expect(clippy::arithmetic_side_effects, reason = "TODO: fix later")]
clippy::arithmetic_side_effects,
reason = "Multiplying finite field scalars gives no unexpected side effects"
)]
let parent_pt = let parent_pt =
Scalar::from_repr(self.value.0.private_key_holder.nullifier_secret_key.into()) Scalar::from_repr(self.value.0.private_key_holder.nullifier_secret_key.into())
.expect("Key generated as scalar, must be valid representation") .expect("Key generated as scalar, must be valid representation")
@ -67,7 +64,8 @@ impl KeyNode for ChildKeysPrivate {
input.extend_from_slice(b"LEE_seed_priv"); input.extend_from_slice(b"LEE_seed_priv");
input.extend_from_slice(&parent_pt.to_bytes()); input.extend_from_slice(&parent_pt.to_bytes());
input.extend_from_slice(&cci.to_le_bytes()); #[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
input.extend_from_slice(&cci.to_be_bytes());
let hash_value = hmac_sha512::HMAC::mac(input, self.ccc); let hash_value = hmac_sha512::HMAC::mac(input, self.ccc);
@ -90,7 +88,7 @@ impl KeyNode for ChildKeysPrivate {
value: ( value: (
KeyChain { KeyChain {
secret_spending_key: ssk, secret_spending_key: ssk,
nullifer_public_key: npk, nullifier_public_key: npk,
viewing_public_key: vpk, viewing_public_key: vpk,
private_key_holder: PrivateKeyHolder { private_key_holder: PrivateKeyHolder {
nullifier_secret_key: nsk, nullifier_secret_key: nsk,
@ -113,18 +111,26 @@ impl KeyNode for ChildKeysPrivate {
} }
fn account_id(&self) -> nssa::AccountId { fn account_id(&self) -> nssa::AccountId {
nssa::AccountId::from(&self.value.0.nullifer_public_key) nssa::AccountId::from(&self.value.0.nullifier_public_key)
} }
} }
impl<'keys> From<&'keys ChildKeysPrivate> for &'keys (KeyChain, nssa::Account) { #[expect(
fn from(value: &'keys ChildKeysPrivate) -> Self { clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a ChildKeysPrivate> for &'a (KeyChain, nssa::Account) {
fn from(value: &'a ChildKeysPrivate) -> Self {
&value.value &value.value
} }
} }
impl<'keys> From<&'keys mut ChildKeysPrivate> for &'keys mut (KeyChain, nssa::Account) { #[expect(
fn from(value: &'keys mut ChildKeysPrivate) -> Self { clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a mut ChildKeysPrivate> for &'a mut (KeyChain, nssa::Account) {
fn from(value: &'a mut ChildKeysPrivate) -> Self {
&mut value.value &mut value.value
} }
} }
@ -166,7 +172,7 @@ mod tests {
7, 123, 125, 191, 233, 183, 201, 4, 20, 214, 155, 210, 45, 234, 27, 240, 194, 111, 97, 7, 123, 125, 191, 233, 183, 201, 4, 20, 214, 155, 210, 45, 234, 27, 240, 194, 111, 97,
247, 155, 113, 122, 246, 192, 0, 70, 61, 76, 71, 70, 2, 247, 155, 113, 122, 246, 192, 0, 70, 61, 76, 71, 70, 2,
]); ]);
let expected_vsk: ViewingSecretKey = [ let expected_vsk = [
155, 90, 54, 75, 228, 130, 68, 201, 129, 251, 180, 195, 250, 64, 34, 230, 241, 204, 155, 90, 54, 75, 228, 130, 68, 201, 129, 251, 180, 195, 250, 64, 34, 230, 241, 204,
216, 50, 149, 156, 10, 67, 208, 74, 9, 10, 47, 59, 50, 202, 216, 50, 149, 156, 10, 67, 208, 74, 9, 10, 47, 59, 50, 202,
]; ];
@ -179,7 +185,7 @@ mod tests {
assert!(expected_ssk == keys.value.0.secret_spending_key); assert!(expected_ssk == keys.value.0.secret_spending_key);
assert!(expected_ccc == keys.ccc); assert!(expected_ccc == keys.ccc);
assert!(expected_nsk == keys.value.0.private_key_holder.nullifier_secret_key); assert!(expected_nsk == keys.value.0.private_key_holder.nullifier_secret_key);
assert!(expected_npk == keys.value.0.nullifer_public_key); assert!(expected_npk == keys.value.0.nullifier_public_key);
assert!(expected_vsk == keys.value.0.private_key_holder.viewing_secret_key); assert!(expected_vsk == keys.value.0.private_key_holder.viewing_secret_key);
assert!(expected_vpk_as_bytes == keys.value.0.viewing_public_key.to_bytes()); assert!(expected_vpk_as_bytes == keys.value.0.viewing_public_key.to_bytes());
} }
@ -197,31 +203,31 @@ mod tests {
let child_node = ChildKeysPrivate::nth_child(&root_node, 42_u32); let child_node = ChildKeysPrivate::nth_child(&root_node, 42_u32);
let expected_ccc: [u8; 32] = [ let expected_ccc: [u8; 32] = [
145, 59, 225, 32, 54, 168, 14, 45, 60, 253, 57, 202, 31, 86, 142, 234, 51, 57, 154, 88, 27, 73, 133, 213, 214, 63, 217, 184, 164, 17, 172, 140, 223, 95, 255, 157, 11, 0, 58,
132, 200, 92, 191, 220, 144, 42, 184, 108, 35, 226, 146, 53, 82, 147, 121, 120, 199, 50, 30, 28, 103, 24, 121, 187,
]; ];
let expected_nsk: NullifierSecretKey = [ let expected_nsk: NullifierSecretKey = [
19, 100, 119, 73, 191, 225, 234, 219, 129, 88, 40, 229, 63, 225, 189, 136, 69, 172, 124, 61, 40, 92, 33, 135, 3, 41, 200, 234, 3, 69, 102, 184, 57, 191, 106, 151, 194,
221, 186, 147, 83, 150, 207, 70, 17, 228, 70, 113, 87, 227, 31, 192, 103, 132, 141, 112, 249, 108, 192, 117, 24, 48, 70, 216,
]; ];
let expected_npk = nssa_core::NullifierPublicKey([ let expected_npk = nssa_core::NullifierPublicKey([
133, 235, 223, 151, 12, 69, 26, 222, 60, 125, 235, 125, 167, 212, 201, 168, 101, 242, 116, 231, 246, 189, 145, 240, 37, 59, 219, 223, 216, 246, 116, 171, 223, 55, 197, 200,
111, 239, 1, 228, 12, 252, 146, 53, 75, 17, 187, 255, 122, 181, 134, 192, 221, 40, 218, 167, 239, 5, 11, 95, 147, 247, 162, 226,
]); ]);
let expected_vsk: ViewingSecretKey = [ let expected_vsk: ViewingSecretKey = [
218, 219, 193, 132, 160, 6, 178, 194, 139, 248, 199, 81, 17, 133, 37, 201, 58, 104, 49, 33, 155, 68, 60, 102, 70, 47, 105, 194, 129, 44, 26, 143, 198, 44, 244, 185, 31, 236,
222, 187, 46, 156, 93, 14, 118, 209, 243, 38, 101, 77, 45, 252, 205, 89, 138, 107, 39, 38, 154, 73, 109, 166, 41, 114,
]; ];
let expected_vpk_as_bytes: [u8; 33] = [ let expected_vpk_as_bytes: [u8; 33] = [
3, 164, 65, 167, 88, 167, 179, 51, 159, 27, 241, 174, 77, 174, 142, 106, 128, 96, 69, 2, 78, 213, 113, 117, 105, 162, 248, 175, 68, 128, 232, 106, 204, 208, 159, 11, 78, 48,
74, 117, 231, 42, 193, 235, 153, 206, 116, 102, 7, 101, 192, 45, 244, 127, 112, 46, 0, 93, 184, 1, 77, 132, 160, 75, 152, 88,
]; ];
assert!(expected_ccc == child_node.ccc); assert!(expected_ccc == child_node.ccc);
assert!(expected_nsk == child_node.value.0.private_key_holder.nullifier_secret_key); assert!(expected_nsk == child_node.value.0.private_key_holder.nullifier_secret_key);
assert!(expected_npk == child_node.value.0.nullifer_public_key); assert!(expected_npk == child_node.value.0.nullifier_public_key);
assert!(expected_vsk == child_node.value.0.private_key_holder.viewing_secret_key); assert!(expected_vsk == child_node.value.0.private_key_holder.viewing_secret_key);
assert!(expected_vpk_as_bytes == child_node.value.0.viewing_public_key.to_bytes()); assert!(expected_vpk_as_bytes == child_node.value.0.viewing_public_key.to_bytes());
} }

View File

@ -13,17 +13,25 @@ pub struct ChildKeysPublic {
} }
impl ChildKeysPublic { impl ChildKeysPublic {
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
fn compute_hash_value(&self, cci: u32) -> [u8; 64] { fn compute_hash_value(&self, cci: u32) -> [u8; 64] {
let mut hash_input = vec![]; let mut hash_input = vec![];
if 2_u32.pow(31) > cci { if ((2_u32).pow(31)).cmp(&cci) == std::cmp::Ordering::Greater {
// Non-harden // Non-harden.
hash_input.extend_from_slice(self.cpk.value()); // BIP-032 compatibility requires 1-byte header from the public_key;
// Not stored in `self.cpk.value()`.
let sk = secp256k1::SecretKey::from_byte_array(*self.csk.value())
.expect("32 bytes, within curve order");
let pk = secp256k1::PublicKey::from_secret_key(&secp256k1::Secp256k1::new(), &sk);
hash_input.extend_from_slice(&secp256k1::PublicKey::serialize(&pk));
} else { } else {
// Harden // Harden.
hash_input.extend_from_slice(&[0_u8]);
hash_input.extend_from_slice(self.csk.value()); hash_input.extend_from_slice(self.csk.value());
} }
hash_input.extend_from_slice(&cci.to_le_bytes());
hash_input.extend_from_slice(&cci.to_be_bytes());
hmac_sha512::HMAC::mac(hash_input, self.ccc) hmac_sha512::HMAC::mac(hash_input, self.ccc)
} }
@ -55,11 +63,13 @@ impl KeyNode for ChildKeysPublic {
) )
.unwrap(); .unwrap();
let csk = nssa::PrivateKey::try_new( let csk = nssa::PrivateKey::try_new({
csk.add_tweak(&Scalar::from_le_bytes(*self.csk.value()).unwrap()) let scalar = Scalar::from_be_bytes(*self.csk.value()).unwrap();
csk.add_tweak(&scalar)
.expect("Expect a valid Scalar") .expect("Expect a valid Scalar")
.secret_bytes(), .secret_bytes()
) })
.unwrap(); .unwrap();
assert!( assert!(
@ -94,8 +104,12 @@ impl KeyNode for ChildKeysPublic {
} }
} }
impl<'keys> From<&'keys ChildKeysPublic> for &'keys nssa::PrivateKey { #[expect(
fn from(value: &'keys ChildKeysPublic) -> Self { clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a ChildKeysPublic> for &'a nssa::PrivateKey {
fn from(value: &'a ChildKeysPublic) -> Self {
&value.csk &value.csk
} }
} }
@ -126,6 +140,7 @@ mod tests {
202, 148, 181, 228, 35, 222, 58, 84, 156, 24, 146, 86, 202, 148, 181, 228, 35, 222, 58, 84, 156, 24, 146, 86,
]) ])
.unwrap(); .unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([ let expected_cpk: PublicKey = PublicKey::try_new([
219, 141, 130, 105, 11, 203, 187, 124, 112, 75, 223, 22, 11, 164, 153, 127, 59, 247, 219, 141, 130, 105, 11, 203, 187, 124, 112, 75, 223, 22, 11, 164, 153, 127, 59, 247,
244, 166, 75, 66, 242, 224, 35, 156, 161, 75, 41, 51, 76, 245, 244, 166, 75, 66, 242, 224, 35, 156, 161, 75, 41, 51, 76, 245,
@ -149,26 +164,20 @@ mod tests {
let cci = (2_u32).pow(31) + 13; let cci = (2_u32).pow(31) + 13;
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
print!(
"{} {}",
child_keys.csk.value()[0],
child_keys.csk.value()[1]
);
let expected_ccc = [ let expected_ccc = [
126, 175, 244, 41, 41, 173, 134, 103, 139, 140, 195, 86, 194, 147, 116, 48, 71, 107, 149, 226, 13, 4, 194, 12, 69, 29, 9, 234, 209, 119, 98, 4, 128, 91, 37, 103, 192, 31,
253, 235, 114, 139, 60, 115, 226, 205, 215, 248, 240, 190, 196, 6, 130, 126, 123, 20, 90, 34, 173, 209, 101, 248, 155, 36,
]; ];
let expected_csk: PrivateKey = PrivateKey::try_new([ let expected_csk: PrivateKey = PrivateKey::try_new([
128, 148, 53, 165, 222, 155, 163, 108, 186, 182, 124, 67, 90, 86, 59, 123, 95, 224, 9, 65, 33, 228, 25, 82, 219, 117, 91, 217, 11, 223, 144, 85, 246, 26, 123, 216, 107,
171, 4, 51, 131, 254, 57, 241, 178, 82, 161, 204, 206, 79, 107, 213, 33, 52, 188, 22, 198, 246, 71, 46, 245, 174, 16, 47,
]) ])
.unwrap(); .unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([ let expected_cpk: PublicKey = PublicKey::try_new([
149, 240, 55, 15, 178, 67, 245, 254, 44, 141, 95, 223, 238, 62, 85, 11, 248, 9, 11, 40, 142, 143, 238, 159, 105, 165, 224, 252, 108, 62, 53, 209, 176, 219, 249, 38, 90, 241,
69, 211, 116, 13, 189, 35, 8, 95, 233, 154, 129, 58, 201, 81, 194, 146, 236, 5, 83, 152, 238, 243, 138, 16, 229, 15,
]) ])
.unwrap(); .unwrap();
@ -189,26 +198,20 @@ mod tests {
let cci = 13; let cci = 13;
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
print!(
"{} {}",
child_keys.csk.value()[0],
child_keys.csk.value()[1]
);
let expected_ccc = [ let expected_ccc = [
50, 29, 113, 102, 49, 130, 64, 0, 247, 95, 135, 187, 118, 162, 65, 65, 194, 53, 189, 79, 228, 242, 119, 211, 203, 198, 175, 95, 36, 4, 234, 139, 45, 137, 138, 54, 211, 187,
242, 66, 178, 168, 2, 51, 193, 155, 72, 209, 2, 207, 251, 16, 28, 79, 80, 232, 216, 101, 145, 19, 101, 220, 217, 141,
]; ];
let expected_csk: PrivateKey = PrivateKey::try_new([ let expected_csk: PrivateKey = PrivateKey::try_new([
162, 32, 211, 190, 180, 74, 151, 246, 189, 93, 8, 57, 182, 239, 125, 245, 192, 255, 24, 185, 147, 32, 242, 145, 91, 123, 77, 42, 33, 134, 84, 12, 165, 117, 70, 158, 201, 95,
186, 251, 23, 194, 186, 252, 121, 190, 54, 147, 199, 1, 109, 153, 14, 12, 92, 235, 128, 156, 194, 169, 68, 35, 165, 127,
]) ])
.unwrap(); .unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([ let expected_cpk: PublicKey = PublicKey::try_new([
183, 48, 207, 170, 221, 111, 118, 9, 40, 67, 123, 162, 159, 169, 34, 157, 23, 37, 232, 119, 16, 145, 121, 97, 244, 186, 35, 136, 34, 140, 171, 206, 139, 11, 208, 207, 121,
102, 231, 187, 199, 191, 205, 146, 159, 22, 79, 100, 10, 223, 158, 45, 28, 22, 140, 98, 161, 179, 212, 173, 238, 220, 2, 34,
]) ])
.unwrap(); .unwrap();
@ -230,19 +233,19 @@ mod tests {
let child_keys = ChildKeysPublic::nth_child(&root_keys, cci); let child_keys = ChildKeysPublic::nth_child(&root_keys, cci);
let expected_ccc = [ let expected_ccc = [
101, 15, 69, 152, 144, 22, 105, 89, 175, 21, 13, 50, 160, 167, 93, 80, 94, 99, 192, 221, 208, 47, 189, 174, 152, 33, 25, 151, 114, 233, 191, 57, 15, 40, 140, 46, 87, 126,
252, 1, 126, 196, 217, 149, 164, 60, 75, 237, 90, 104, 83, 58, 215, 40, 246, 111, 166, 113, 183, 145, 173, 11, 27, 182,
]; ];
let expected_csk: PrivateKey = PrivateKey::try_new([ let expected_csk: PrivateKey = PrivateKey::try_new([
46, 196, 131, 199, 190, 180, 250, 222, 41, 188, 221, 156, 255, 239, 251, 207, 239, 202, 223, 29, 87, 189, 126, 24, 117, 225, 190, 57, 0, 143, 207, 168, 231, 139, 170, 192, 81,
166, 216, 107, 236, 195, 48, 167, 69, 97, 13, 132, 117, 76, 89, 254, 126, 10, 115, 42, 141, 157, 70, 171, 199, 231, 198, 132,
]) ])
.unwrap(); .unwrap();
let expected_cpk: PublicKey = PublicKey::try_new([ let expected_cpk: PublicKey = PublicKey::try_new([
93, 151, 154, 238, 175, 198, 53, 146, 255, 43, 37, 52, 214, 165, 69, 161, 38, 20, 68, 96, 123, 245, 51, 214, 216, 215, 205, 70, 145, 105, 221, 166, 169, 122, 27, 94, 112,
166, 143, 80, 149, 216, 124, 203, 240, 114, 168, 111, 33, 83, 228, 110, 249, 177, 85, 173, 180, 248, 185, 199, 112, 246, 83, 33,
]) ])
.unwrap(); .unwrap();

View File

@ -1,7 +1,7 @@
use std::{collections::BTreeMap, sync::Arc}; use std::collections::BTreeMap;
use anyhow::Result; use anyhow::Result;
use common::sequencer_client::SequencerClient; use nssa::{Account, AccountId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::key_management::{ use crate::key_management::{
@ -197,40 +197,6 @@ impl<N: KeyNode> KeyTree<N> {
} }
impl KeyTree<ChildKeysPrivate> { impl KeyTree<ChildKeysPrivate> {
/// Cleanup of all non-initialized accounts in a private tree.
///
/// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) <
/// depth`.
///
/// If account is default, removes them.
///
/// Chain must be parsed for accounts beforehand.
///
/// Fast, leaves gaps between accounts.
pub fn cleanup_tree_remove_uninit_for_depth(&mut self, depth: u32) {
let mut id_stack = vec![ChainIndex::root()];
while let Some(curr_id) = id_stack.pop() {
if let Some(node) = self.key_map.get(&curr_id)
&& node.value.1 == nssa::Account::default()
&& curr_id != ChainIndex::root()
{
let addr = node.account_id();
self.remove(addr);
}
let mut next_id = curr_id.nth_child(0);
while (next_id.depth()) < depth {
id_stack.push(next_id.clone());
next_id = match next_id.next_in_line() {
Some(id) => id,
None => break,
};
}
}
}
/// Cleanup of non-initialized accounts in a private tree. /// Cleanup of non-initialized accounts in a private tree.
/// ///
/// If account is default, removes them, stops at first non-default account. /// If account is default, removes them, stops at first non-default account.
@ -259,56 +225,17 @@ impl KeyTree<ChildKeysPrivate> {
} }
impl KeyTree<ChildKeysPublic> { impl KeyTree<ChildKeysPublic> {
/// Cleanup of all non-initialized accounts in a public tree.
///
/// For given `depth` checks children to a tree such that their `ChainIndex::depth(&self) <
/// depth`.
///
/// If account is default, removes them.
///
/// Fast, leaves gaps between accounts.
pub async fn cleanup_tree_remove_ininit_for_depth(
&mut self,
depth: u32,
client: Arc<SequencerClient>,
) -> Result<()> {
let mut id_stack = vec![ChainIndex::root()];
while let Some(curr_id) = id_stack.pop() {
if let Some(node) = self.key_map.get(&curr_id) {
let address = node.account_id();
let node_acc = client.get_account(address).await?.account;
if node_acc == nssa::Account::default() && curr_id != ChainIndex::root() {
self.remove(address);
}
}
let mut next_id = curr_id.nth_child(0);
while (next_id.depth()) < depth {
id_stack.push(next_id.clone());
next_id = match next_id.next_in_line() {
Some(id) => id,
None => break,
};
}
}
Ok(())
}
/// Cleanup of non-initialized accounts in a public tree. /// Cleanup of non-initialized accounts in a public tree.
/// ///
/// If account is default, removes them, stops at first non-default account. /// If account is default, removes them, stops at first non-default account.
/// ///
/// Walks through tree in lairs of same depth using `ChainIndex::chain_ids_at_depth()`. /// Walks through tree in layers of same depth using `ChainIndex::chain_ids_at_depth()`.
/// ///
/// Slow, maintains tree consistency. /// Slow, maintains tree consistency.
pub async fn cleanup_tree_remove_uninit_layered( pub async fn cleanup_tree_remove_uninit_layered<F: Future<Output = Result<Account>>>(
&mut self, &mut self,
depth: u32, depth: u32,
client: Arc<SequencerClient>, get_account: impl Fn(AccountId) -> F,
) -> Result<()> { ) -> Result<()> {
let depth = usize::try_from(depth).expect("Depth is expected to fit in usize"); let depth = usize::try_from(depth).expect("Depth is expected to fit in usize");
'outer: for i in (1..depth).rev() { 'outer: for i in (1..depth).rev() {
@ -316,7 +243,7 @@ impl KeyTree<ChildKeysPublic> {
for id in ChainIndex::chain_ids_at_depth(i) { for id in ChainIndex::chain_ids_at_depth(i) {
if let Some(node) = self.key_map.get(&id) { if let Some(node) = self.key_map.get(&id) {
let address = node.account_id(); let address = node.account_id();
let node_acc = client.get_account(address).await?.account; let node_acc = get_account(address).await?;
if node_acc == nssa::Account::default() { if node_acc == nssa::Account::default() {
let addr = node.account_id(); let addr = node.account_id();

View File

@ -16,7 +16,7 @@ pub type PublicAccountSigningKey = [u8; 32];
pub struct KeyChain { pub struct KeyChain {
pub secret_spending_key: SecretSpendingKey, pub secret_spending_key: SecretSpendingKey,
pub private_key_holder: PrivateKeyHolder, pub private_key_holder: PrivateKeyHolder,
pub nullifer_public_key: NullifierPublicKey, pub nullifier_public_key: NullifierPublicKey,
pub viewing_public_key: ViewingPublicKey, pub viewing_public_key: ViewingPublicKey,
} }
@ -30,13 +30,13 @@ impl KeyChain {
let private_key_holder = secret_spending_key.produce_private_key_holder(None); let private_key_holder = secret_spending_key.produce_private_key_holder(None);
let nullifer_public_key = private_key_holder.generate_nullifier_public_key(); let nullifier_public_key = private_key_holder.generate_nullifier_public_key();
let viewing_public_key = private_key_holder.generate_viewing_public_key(); let viewing_public_key = private_key_holder.generate_viewing_public_key();
Self { Self {
secret_spending_key, secret_spending_key,
private_key_holder, private_key_holder,
nullifer_public_key, nullifier_public_key,
viewing_public_key, viewing_public_key,
} }
} }
@ -50,13 +50,13 @@ impl KeyChain {
let private_key_holder = secret_spending_key.produce_private_key_holder(None); let private_key_holder = secret_spending_key.produce_private_key_holder(None);
let nullifer_public_key = private_key_holder.generate_nullifier_public_key(); let nullifier_public_key = private_key_holder.generate_nullifier_public_key();
let viewing_public_key = private_key_holder.generate_viewing_public_key(); let viewing_public_key = private_key_holder.generate_viewing_public_key();
Self { Self {
secret_spending_key, secret_spending_key,
private_key_holder, private_key_holder,
nullifer_public_key, nullifier_public_key,
viewing_public_key, viewing_public_key,
} }
} }
@ -93,7 +93,7 @@ mod tests {
// Check that key holder fields are initialized with expected types // Check that key holder fields are initialized with expected types
assert_ne!( assert_ne!(
account_id_key_holder.nullifer_public_key.as_ref(), account_id_key_holder.nullifier_public_key.as_ref(),
&[0_u8; 32] &[0_u8; 32]
); );
} }
@ -119,7 +119,7 @@ mod tests {
let utxo_secret_key_holder = top_secret_key_holder.produce_private_key_holder(None); let utxo_secret_key_holder = top_secret_key_holder.produce_private_key_holder(None);
let nullifer_public_key = utxo_secret_key_holder.generate_nullifier_public_key(); let nullifier_public_key = utxo_secret_key_holder.generate_nullifier_public_key();
let viewing_public_key = utxo_secret_key_holder.generate_viewing_public_key(); let viewing_public_key = utxo_secret_key_holder.generate_viewing_public_key();
let pub_account_signing_key = nssa::PrivateKey::new_os_random(); let pub_account_signing_key = nssa::PrivateKey::new_os_random();
@ -150,7 +150,7 @@ mod tests {
println!("Account {:?}", account.value().to_base58()); println!("Account {:?}", account.value().to_base58());
println!( println!(
"Nulifier public key {:?}", "Nulifier public key {:?}",
hex::encode(nullifer_public_key.to_byte_array()) hex::encode(nullifier_public_key.to_byte_array())
); );
println!( println!(
"Viewing public key {:?}", "Viewing public key {:?}",
@ -183,7 +183,7 @@ mod tests {
fn non_trivial_chain_index() { fn non_trivial_chain_index() {
let keys = account_with_chain_index_2_for_tests(); let keys = account_with_chain_index_2_for_tests();
let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifer_public_key); let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifier_public_key);
let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key); let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key);
let key_receiver = keys.calculate_shared_secret_receiver( let key_receiver = keys.calculate_shared_secret_receiver(

View File

@ -10,16 +10,16 @@ use sha2::{Digest as _, digest::FixedOutput as _};
const NSSA_ENTROPY_BYTES: [u8; 32] = [0; 32]; const NSSA_ENTROPY_BYTES: [u8; 32] = [0; 32];
#[derive(Debug)]
/// Seed holder. Non-clonable to ensure that different holders use different seeds. /// Seed holder. Non-clonable to ensure that different holders use different seeds.
/// Produces `TopSecretKeyHolder` objects. /// Produces `TopSecretKeyHolder` objects.
#[derive(Debug)]
pub struct SeedHolder { pub struct SeedHolder {
// ToDo: Needs to be vec as serde derives is not implemented for [u8; 64] // ToDo: Needs to be vec as serde derives is not implemented for [u8; 64]
pub(crate) seed: Vec<u8>, pub(crate) seed: Vec<u8>,
} }
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
/// Secret spending key object. Can produce `PrivateKeyHolder` objects. /// Secret spending key object. Can produce `PrivateKeyHolder` objects.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SecretSpendingKey(pub(crate) [u8; 32]); pub struct SecretSpendingKey(pub(crate) [u8; 32]);
pub type ViewingSecretKey = Scalar; pub type ViewingSecretKey = Scalar;
@ -79,6 +79,7 @@ impl SeedHolder {
impl SecretSpendingKey { impl SecretSpendingKey {
#[must_use] #[must_use]
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
pub fn generate_nullifier_secret_key(&self, index: Option<u32>) -> NullifierSecretKey { pub fn generate_nullifier_secret_key(&self, index: Option<u32>) -> NullifierSecretKey {
const PREFIX: &[u8; 8] = b"LEE/keys"; const PREFIX: &[u8; 8] = b"LEE/keys";
const SUFFIX_1: &[u8; 1] = &[1]; const SUFFIX_1: &[u8; 1] = &[1];
@ -93,13 +94,14 @@ impl SecretSpendingKey {
hasher.update(PREFIX); hasher.update(PREFIX);
hasher.update(self.0); hasher.update(self.0);
hasher.update(SUFFIX_1); hasher.update(SUFFIX_1);
hasher.update(index.to_le_bytes()); hasher.update(index.to_be_bytes());
hasher.update(SUFFIX_2); hasher.update(SUFFIX_2);
<NullifierSecretKey>::from(hasher.finalize_fixed()) <NullifierSecretKey>::from(hasher.finalize_fixed())
} }
#[must_use] #[must_use]
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
pub fn generate_viewing_secret_key(&self, index: Option<u32>) -> ViewingSecretKey { pub fn generate_viewing_secret_key(&self, index: Option<u32>) -> ViewingSecretKey {
const PREFIX: &[u8; 8] = b"LEE/keys"; const PREFIX: &[u8; 8] = b"LEE/keys";
const SUFFIX_1: &[u8; 1] = &[2]; const SUFFIX_1: &[u8; 1] = &[2];
@ -114,7 +116,7 @@ impl SecretSpendingKey {
hasher.update(PREFIX); hasher.update(PREFIX);
hasher.update(self.0); hasher.update(self.0);
hasher.update(SUFFIX_1); hasher.update(SUFFIX_1);
hasher.update(index.to_le_bytes()); hasher.update(index.to_be_bytes());
hasher.update(SUFFIX_2); hasher.update(SUFFIX_2);
hasher.finalize_fixed().into() hasher.finalize_fixed().into()

View File

@ -46,7 +46,7 @@ impl NSSAUserData {
) -> bool { ) -> bool {
let mut check_res = true; let mut check_res = true;
for (account_id, (key, _)) in accounts_keys_map { for (account_id, (key, _)) in accounts_keys_map {
let expected_account_id = nssa::AccountId::from(&key.nullifer_public_key); let expected_account_id = nssa::AccountId::from(&key.nullifier_public_key);
if expected_account_id != *account_id { if expected_account_id != *account_id {
println!("{expected_account_id}, {account_id}"); println!("{expected_account_id}, {account_id}");
check_res = false; check_res = false;
@ -66,13 +66,13 @@ impl NSSAUserData {
) -> Result<Self> { ) -> Result<Self> {
if !Self::valid_public_key_transaction_pairing_check(&default_accounts_keys) { if !Self::valid_public_key_transaction_pairing_check(&default_accounts_keys) {
anyhow::bail!( anyhow::bail!(
"Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys" "Key transaction pairing check not satisfied, there are public account_ids, which are not derived from keys"
); );
} }
if !Self::valid_private_key_transaction_pairing_check(&default_accounts_key_chains) { if !Self::valid_private_key_transaction_pairing_check(&default_accounts_key_chains) {
anyhow::bail!( anyhow::bail!(
"Key transaction pairing check not satisfied, there is account_ids, which is not derived from keys" "Key transaction pairing check not satisfied, there are private account_ids, which are not derived from keys"
); );
} }

View File

@ -14,6 +14,7 @@ anyhow.workspace = true
thiserror.workspace = true thiserror.workspace = true
risc0-zkvm.workspace = true risc0-zkvm.workspace = true
serde.workspace = true serde.workspace = true
serde_with.workspace = true
sha2.workspace = true sha2.workspace = true
rand.workspace = true rand.workspace = true
borsh.workspace = true borsh.workspace = true
@ -37,4 +38,4 @@ test-case = "3.3.1"
[features] [features]
default = [] default = []
prove = ["risc0-zkvm/prove"] prove = ["risc0-zkvm/prove"]
test-utils = [] test-utils = []

View File

@ -1,3 +1,5 @@
use std::str::FromStr;
use borsh::{BorshDeserialize, BorshSerialize}; use borsh::{BorshDeserialize, BorshSerialize};
pub use private_key::PrivateKey; pub use private_key::PrivateKey;
pub use public_key::PublicKey; pub use public_key::PublicKey;
@ -12,11 +14,27 @@ pub struct Signature {
} }
impl std::fmt::Debug for Signature { impl std::fmt::Debug for Signature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for Signature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.value)) write!(f, "{}", hex::encode(self.value))
} }
} }
impl FromStr for Signature {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 64];
hex::decode_to_slice(s, &mut bytes)?;
Ok(Self { value: bytes })
}
}
impl Signature { impl Signature {
#[must_use] #[must_use]
pub fn new(key: &PrivateKey, message: &[u8]) -> Self { pub fn new(key: &PrivateKey, message: &[u8]) -> Self {

View File

@ -1,13 +1,37 @@
use std::str::FromStr;
use rand::{Rng as _, rngs::OsRng}; use rand::{Rng as _, rngs::OsRng};
use serde::{Deserialize, Serialize}; use serde_with::{DeserializeFromStr, SerializeDisplay};
use crate::error::NssaError; use crate::error::NssaError;
// TODO: Remove Debug, Clone, Serialize, Deserialize, PartialEq and Eq for security reasons // TODO: Remove Debug, Clone, Serialize, Deserialize, PartialEq and Eq for security reasons
// TODO: Implement Zeroize // TODO: Implement Zeroize
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[derive(Clone, SerializeDisplay, DeserializeFromStr, PartialEq, Eq)]
pub struct PrivateKey([u8; 32]); pub struct PrivateKey([u8; 32]);
impl std::fmt::Debug for PrivateKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for PrivateKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
impl FromStr for PrivateKey {
type Err = NssaError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes).map_err(|_err| NssaError::InvalidPrivateKey)?;
Self::try_new(bytes)
}
}
impl PrivateKey { impl PrivateKey {
#[must_use] #[must_use]
pub fn new_os_random() -> Self { pub fn new_os_random() -> Self {

View File

@ -1,19 +1,38 @@
use std::str::FromStr;
use borsh::{BorshDeserialize, BorshSerialize}; use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::account::AccountId; use nssa_core::account::AccountId;
use serde::{Deserialize, Serialize}; use serde_with::{DeserializeFromStr, SerializeDisplay};
use sha2::{Digest as _, Sha256}; use sha2::{Digest as _, Sha256};
use crate::{PrivateKey, error::NssaError}; use crate::{PrivateKey, error::NssaError};
#[derive(Clone, PartialEq, Eq, BorshSerialize, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, BorshSerialize, SerializeDisplay, DeserializeFromStr)]
pub struct PublicKey([u8; 32]); pub struct PublicKey([u8; 32]);
impl std::fmt::Debug for PublicKey { impl std::fmt::Debug for PublicKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for PublicKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0)) write!(f, "{}", hex::encode(self.0))
} }
} }
impl FromStr for PublicKey {
type Err = NssaError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes)
.map_err(|_err| NssaError::InvalidPublicKey(secp256k1::Error::InvalidPublicKey))?;
Self::try_new(bytes)
}
}
impl BorshDeserialize for PublicKey { impl BorshDeserialize for PublicKey {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> { fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let mut buf = [0_u8; 32]; let mut buf = [0_u8; 32];

View File

@ -8,10 +8,9 @@ license = { workspace = true }
workspace = true workspace = true
[dependencies] [dependencies]
nssa = { workspace = true, optional = true, features = ["test-utils"], default-features = true }
nssa_core.workspace = true nssa_core.workspace = true
token_core.workspace = true token_core.workspace = true
amm_core.workspace = true amm_core.workspace = true
[features] [dev-dependencies]
nssa = ["dep:nssa"] nssa = { workspace = true, features = ["test-utils"] }

View File

@ -4,7 +4,6 @@ use amm_core::{
PoolDefinition, compute_liquidity_token_pda, compute_liquidity_token_pda_seed, PoolDefinition, compute_liquidity_token_pda, compute_liquidity_token_pda_seed,
compute_pool_pda, compute_vault_pda, compute_vault_pda_seed, compute_pool_pda, compute_vault_pda, compute_vault_pda_seed,
}; };
#[cfg(feature = "nssa")]
use nssa::{ use nssa::{
PrivateKey, PublicKey, PublicTransaction, V02State, program::Program, public_transaction, PrivateKey, PublicKey, PublicTransaction, V02State, program::Program, public_transaction,
}; };
@ -25,16 +24,15 @@ struct BalanceForTests;
struct ChainedCallForTests; struct ChainedCallForTests;
struct IdForTests; struct IdForTests;
struct AccountWithMetadataForTests; struct AccountWithMetadataForTests;
#[cfg(feature = "nssa")]
struct PrivateKeysForTests; struct PrivateKeysForTests;
#[cfg(feature = "nssa")]
struct IdForExeTests; struct IdForExeTests;
#[cfg(feature = "nssa")]
struct BalanceForExeTests; struct BalanceForExeTests;
#[cfg(feature = "nssa")]
struct AccountsForExeTests; struct AccountsForExeTests;
#[cfg(feature = "nssa")]
impl PrivateKeysForTests { impl PrivateKeysForTests {
fn user_token_a_key() -> PrivateKey { fn user_token_a_key() -> PrivateKey {
PrivateKey::try_new([31; 32]).expect("Keys constructor expects valid private key") PrivateKey::try_new([31; 32]).expect("Keys constructor expects valid private key")
@ -1008,7 +1006,6 @@ impl AccountWithMetadataForTests {
} }
} }
#[cfg(feature = "nssa")]
impl BalanceForExeTests { impl BalanceForExeTests {
fn user_token_a_holding_init() -> u128 { fn user_token_a_holding_init() -> u128 {
10_000 10_000
@ -1172,7 +1169,6 @@ impl BalanceForExeTests {
} }
} }
#[cfg(feature = "nssa")]
impl IdForExeTests { impl IdForExeTests {
fn pool_definition_id() -> AccountId { fn pool_definition_id() -> AccountId {
amm_core::compute_pool_pda( amm_core::compute_pool_pda(
@ -1229,7 +1225,6 @@ impl IdForExeTests {
} }
} }
#[cfg(feature = "nssa")]
impl AccountsForExeTests { impl AccountsForExeTests {
fn user_token_a_holding() -> Account { fn user_token_a_holding() -> Account {
Account { Account {
@ -2641,7 +2636,6 @@ fn new_definition_lp_symmetric_amounts() {
assert_eq!(chained_call_lp, expected_lp_call); assert_eq!(chained_call_lp, expected_lp_call);
} }
#[cfg(feature = "nssa")]
fn state_for_amm_tests() -> V02State { fn state_for_amm_tests() -> V02State {
let initial_data = []; let initial_data = [];
let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]);
@ -2685,7 +2679,6 @@ fn state_for_amm_tests() -> V02State {
state state
} }
#[cfg(feature = "nssa")]
fn state_for_amm_tests_with_new_def() -> V02State { fn state_for_amm_tests_with_new_def() -> V02State {
let initial_data = []; let initial_data = [];
let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]); let mut state = V02State::new_with_genesis_accounts(&initial_data, &[]);
@ -2708,7 +2701,6 @@ fn state_for_amm_tests_with_new_def() -> V02State {
state state
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_remove() { fn simple_amm_remove() {
let mut state = state_for_amm_tests(); let mut state = state_for_amm_tests();
@ -2768,7 +2760,6 @@ fn simple_amm_remove() {
assert_eq!(user_token_lp_post, expected_user_token_lp); assert_eq!(user_token_lp_post, expected_user_token_lp);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() { fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() {
let mut state = state_for_amm_tests_with_new_def(); let mut state = state_for_amm_tests_with_new_def();
@ -2849,7 +2840,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_and_uninit_user_lp() {
assert_eq!(user_token_lp_post, expected_user_token_lp); assert_eq!(user_token_lp_post, expected_user_token_lp);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() { fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() {
let mut state = state_for_amm_tests_with_new_def(); let mut state = state_for_amm_tests_with_new_def();
@ -2934,7 +2924,6 @@ fn simple_amm_new_definition_inactive_initialized_pool_init_user_lp() {
assert_eq!(user_token_lp_post, expected_user_token_lp); assert_eq!(user_token_lp_post, expected_user_token_lp);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_new_definition_uninitialized_pool() { fn simple_amm_new_definition_uninitialized_pool() {
let mut state = state_for_amm_tests_with_new_def(); let mut state = state_for_amm_tests_with_new_def();
@ -3007,7 +2996,6 @@ fn simple_amm_new_definition_uninitialized_pool() {
assert_eq!(user_token_lp_post, expected_user_token_lp); assert_eq!(user_token_lp_post, expected_user_token_lp);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_add() { fn simple_amm_add() {
let mut state = state_for_amm_tests(); let mut state = state_for_amm_tests();
@ -3070,7 +3058,6 @@ fn simple_amm_add() {
assert_eq!(user_token_lp_post, expected_user_token_lp); assert_eq!(user_token_lp_post, expected_user_token_lp);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_swap_1() { fn simple_amm_swap_1() {
let mut state = state_for_amm_tests(); let mut state = state_for_amm_tests();
@ -3122,7 +3109,6 @@ fn simple_amm_swap_1() {
assert_eq!(user_token_b_post, expected_user_token_b); assert_eq!(user_token_b_post, expected_user_token_b);
} }
#[cfg(feature = "nssa")]
#[test] #[test]
fn simple_amm_swap_2() { fn simple_amm_swap_2() {
let mut state = state_for_amm_tests(); let mut state = state_for_amm_tests();

View File

@ -7,7 +7,7 @@ use common::{
transaction::NSSATransaction, transaction::NSSATransaction,
}; };
use nssa::V02State; use nssa::V02State;
use storage::sequencer::RocksDBIO; use storage::{error::DbError, sequencer::RocksDBIO};
pub struct SequencerStore { pub struct SequencerStore {
dbio: RocksDBIO, dbio: RocksDBIO,
@ -42,8 +42,8 @@ impl SequencerStore {
}) })
} }
pub fn get_block_at_id(&self, id: u64) -> Result<Block> { pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>, DbError> {
Ok(self.dbio.get_block(id)?) self.dbio.get_block(id)
} }
pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> { pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> {
@ -56,16 +56,20 @@ impl SequencerStore {
/// Returns the transaction corresponding to the given hash, if it exists in the blockchain. /// Returns the transaction corresponding to the given hash, if it exists in the blockchain.
pub fn get_transaction_by_hash(&self, hash: HashType) -> Option<NSSATransaction> { pub fn get_transaction_by_hash(&self, hash: HashType) -> Option<NSSATransaction> {
let block_id = self.tx_hash_to_block_map.get(&hash); let block_id = *self.tx_hash_to_block_map.get(&hash)?;
let block = block_id.map(|&id| self.get_block_at_id(id)); let block = self
if let Some(Ok(block)) = block { .get_block_at_id(block_id)
for transaction in block.body.transactions { .ok()
if transaction.hash() == hash { .flatten()
return Some(transaction); .expect("Block should be present since the hash is in the map");
} for transaction in block.body.transactions {
if transaction.hash() == hash {
return Some(transaction);
} }
} }
None panic!(
"Transaction hash was in the map but transaction was not found in the block. This should never happen."
);
} }
pub fn latest_block_meta(&self) -> Result<BlockMeta> { pub fn latest_block_meta(&self) -> Result<BlockMeta> {
@ -244,7 +248,7 @@ mod tests {
node_store.update(&block, [1; 32], &dummy_state).unwrap(); node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Verify initial status is Pending // Verify initial status is Pending
let retrieved_block = node_store.get_block_at_id(block_id).unwrap(); let retrieved_block = node_store.get_block_at_id(block_id).unwrap().unwrap();
assert!(matches!( assert!(matches!(
retrieved_block.bedrock_status, retrieved_block.bedrock_status,
common::block::BedrockStatus::Pending common::block::BedrockStatus::Pending
@ -254,7 +258,7 @@ mod tests {
node_store.mark_block_as_finalized(block_id).unwrap(); node_store.mark_block_as_finalized(block_id).unwrap();
// Verify status is now Finalized // Verify status is now Finalized
let finalized_block = node_store.get_block_at_id(block_id).unwrap(); let finalized_block = node_store.get_block_at_id(block_id).unwrap().unwrap();
assert!(matches!( assert!(matches!(
finalized_block.bedrock_status, finalized_block.bedrock_status,
common::block::BedrockStatus::Finalized common::block::BedrockStatus::Finalized

View File

@ -22,8 +22,6 @@ use url::Url;
pub struct SequencerConfig { pub struct SequencerConfig {
/// Home dir of sequencer storage. /// Home dir of sequencer storage.
pub home: PathBuf, pub home: PathBuf,
/// Override rust log (env var logging level).
pub override_rust_log: Option<String>,
/// Genesis id. /// Genesis id.
pub genesis_id: u64, pub genesis_id: u64,
/// If `True`, then adds random sequence of bytes to genesis block. /// If `True`, then adds random sequence of bytes to genesis block.
@ -41,8 +39,6 @@ pub struct SequencerConfig {
/// Interval in which pending blocks are retried. /// Interval in which pending blocks are retried.
#[serde(with = "humantime_serde")] #[serde(with = "humantime_serde")]
pub retry_pending_blocks_timeout: Duration, pub retry_pending_blocks_timeout: Duration,
/// Port to listen.
pub port: u16,
/// List of initial accounts data. /// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>, pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments. /// List of initial commitments.

View File

@ -15,6 +15,7 @@ use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SI
use mempool::{MemPool, MemPoolHandle}; use mempool::{MemPool, MemPoolHandle};
#[cfg(feature = "mock")] #[cfg(feature = "mock")]
pub use mock::SequencerCoreWithMockClients; pub use mock::SequencerCoreWithMockClients;
pub use storage::error::DbError;
use crate::{ use crate::{
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId},
@ -392,14 +393,12 @@ mod tests {
SequencerConfig { SequencerConfig {
home, home,
override_rust_log: Some("info".to_owned()),
genesis_id: 1, genesis_id: 1,
is_genesis_random: false, is_genesis_random: false,
max_num_tx_in_block: 10, max_num_tx_in_block: 10,
max_block_size: bytesize::ByteSize::mib(1), max_block_size: bytesize::ByteSize::mib(1),
mempool_max_size: 10000, mempool_max_size: 10000,
block_create_timeout: Duration::from_secs(1), block_create_timeout: Duration::from_secs(1),
port: 8080,
initial_accounts, initial_accounts,
initial_commitments: vec![], initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(), signing_key: *sequencer_sign_key_for_testing().value(),
@ -480,7 +479,6 @@ mod tests {
assert_eq!(sequencer.chain_height, config.genesis_id); assert_eq!(sequencer.chain_height, config.genesis_id);
assert_eq!(sequencer.sequencer_config.max_num_tx_in_block, 10); assert_eq!(sequencer.sequencer_config.max_num_tx_in_block, 10);
assert_eq!(sequencer.sequencer_config.port, 8080);
let acc1_account_id = config.initial_accounts[0].account_id; let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id; let acc2_account_id = config.initial_accounts[1].account_id;
@ -698,6 +696,7 @@ mod tests {
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap(); .unwrap();
// Only one should be included in the block // Only one should be included in the block
@ -725,6 +724,7 @@ mod tests {
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap(); .unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]); assert_eq!(block.body.transactions, vec![tx.clone()]);
@ -736,6 +736,7 @@ mod tests {
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap(); .unwrap();
assert!(block.body.transactions.is_empty()); assert!(block.body.transactions.is_empty());
} }
@ -770,6 +771,7 @@ mod tests {
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap(); .unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]); assert_eq!(block.body.transactions, vec![tx.clone()]);
} }
@ -888,6 +890,7 @@ mod tests {
let new_block = sequencer let new_block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap(); .unwrap();
assert_eq!( assert_eq!(

View File

@ -1,5 +1,5 @@
[package] [package]
name = "sequencer_runner" name = "sequencer_service"
version = "0.1.0" version = "0.1.0"
edition = "2024" edition = "2024"
license = { workspace = true } license = { workspace = true }
@ -9,20 +9,25 @@ workspace = true
[dependencies] [dependencies]
common.workspace = true common.workspace = true
nssa.workspace = true
mempool.workspace = true
sequencer_core = { workspace = true, features = ["testnet"] } sequencer_core = { workspace = true, features = ["testnet"] }
sequencer_rpc.workspace = true sequencer_service_protocol.workspace = true
sequencer_service_rpc = { workspace = true, features = ["server"] }
indexer_service_rpc = { workspace = true, features = ["client"] } indexer_service_rpc = { workspace = true, features = ["client"] }
clap = { workspace = true, features = ["derive", "env"] } clap = { workspace = true, features = ["derive", "env"] }
anyhow.workspace = true anyhow.workspace = true
env_logger.workspace = true env_logger.workspace = true
log.workspace = true log.workspace = true
actix.workspace = true
actix-web.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-util.workspace = true
jsonrpsee.workspace = true
futures.workspace = true futures.workspace = true
bytesize.workspace = true
borsh.workspace = true
[features] [features]
default = [] default = []
# Runs the sequencer in standalone mode without depending on Bedrock and Indexer services. # Runs the sequencer in standalone mode without depending on Bedrock and Indexer services.
standalone = ["sequencer_core/mock", "sequencer_rpc/standalone"] standalone = ["sequencer_core/mock"]

View File

@ -40,7 +40,7 @@ RUN r0vm --version
# Install logos blockchain circuits # Install logos blockchain circuits
RUN curl -sSL https://raw.githubusercontent.com/logos-blockchain/logos-blockchain/main/scripts/setup-logos-blockchain-circuits.sh | bash RUN curl -sSL https://raw.githubusercontent.com/logos-blockchain/logos-blockchain/main/scripts/setup-logos-blockchain-circuits.sh | bash
WORKDIR /sequencer_runner WORKDIR /sequencer_service
# Build argument to enable standalone feature (defaults to false) # Build argument to enable standalone feature (defaults to false)
ARG STANDALONE=false ARG STANDALONE=false
@ -48,17 +48,17 @@ ARG STANDALONE=false
# Planner stage - generates dependency recipe # Planner stage - generates dependency recipe
FROM chef AS planner FROM chef AS planner
COPY . . COPY . .
RUN cargo chef prepare --bin sequencer_runner --recipe-path recipe.json RUN cargo chef prepare --bin sequencer_service --recipe-path recipe.json
# Builder stage - builds dependencies and application # Builder stage - builds dependencies and application
FROM chef AS builder FROM chef AS builder
ARG STANDALONE ARG STANDALONE
COPY --from=planner /sequencer_runner/recipe.json recipe.json COPY --from=planner /sequencer_service/recipe.json recipe.json
# Build dependencies only (this layer will be cached) # Build dependencies only (this layer will be cached)
RUN if [ "$STANDALONE" = "true" ]; then \ RUN if [ "$STANDALONE" = "true" ]; then \
cargo chef cook --bin sequencer_runner --features standalone --release --recipe-path recipe.json; \ cargo chef cook --bin sequencer_service --features standalone --release --recipe-path recipe.json; \
else \ else \
cargo chef cook --bin sequencer_runner --release --recipe-path recipe.json; \ cargo chef cook --bin sequencer_service --release --recipe-path recipe.json; \
fi fi
# Copy source code # Copy source code
@ -66,13 +66,13 @@ COPY . .
# Build the actual application # Build the actual application
RUN if [ "$STANDALONE" = "true" ]; then \ RUN if [ "$STANDALONE" = "true" ]; then \
cargo build --release --features standalone --bin sequencer_runner; \ cargo build --release --features standalone --bin sequencer_service; \
else \ else \
cargo build --release --bin sequencer_runner; \ cargo build --release --bin sequencer_service; \
fi fi
# Strip debug symbols to reduce binary size # Strip debug symbols to reduce binary size
RUN strip /sequencer_runner/target/release/sequencer_runner RUN strip /sequencer_service/target/release/sequencer_service
# Runtime stage - minimal image # Runtime stage - minimal image
FROM debian:trixie-slim FROM debian:trixie-slim
@ -84,11 +84,11 @@ RUN apt-get update \
# Create non-root user for security # Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash sequencer_user && \ RUN useradd -m -u 1000 -s /bin/bash sequencer_user && \
mkdir -p /sequencer_runner /etc/sequencer_runner && \ mkdir -p /sequencer_service /etc/sequencer_service && \
chown -R sequencer_user:sequencer_user /sequencer_runner /etc/sequencer_runner chown -R sequencer_user:sequencer_user /sequencer_service /etc/sequencer_service
# Copy binary from builder # Copy binary from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_runner/target/release/sequencer_runner /usr/local/bin/sequencer_runner COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_service/target/release/sequencer_service /usr/local/bin/sequencer_service
# Copy r0vm binary from builder # Copy r0vm binary from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /usr/local/bin/r0vm COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /usr/local/bin/r0vm
@ -97,7 +97,7 @@ COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /u
COPY --from=builder --chown=sequencer_user:sequencer_user /root/.logos-blockchain-circuits /home/sequencer_user/.logos-blockchain-circuits COPY --from=builder --chown=sequencer_user:sequencer_user /root/.logos-blockchain-circuits /home/sequencer_user/.logos-blockchain-circuits
# Copy entrypoint script # Copy entrypoint script
COPY sequencer_runner/docker-entrypoint.sh /docker-entrypoint.sh COPY sequencer/service/docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh RUN chmod +x /docker-entrypoint.sh
# Expose default port # Expose default port
@ -124,5 +124,5 @@ USER root
ENTRYPOINT ["/docker-entrypoint.sh"] ENTRYPOINT ["/docker-entrypoint.sh"]
WORKDIR /sequencer_runner WORKDIR /sequencer_service
CMD ["sequencer_runner", "/etc/sequencer_runner"] CMD ["sequencer_service", "/etc/sequencer_service/sequencer_config.json"]

View File

@ -1,6 +1,5 @@
{ {
"home": ".", "home": ".",
"override_rust_log": null,
"genesis_id": 1, "genesis_id": 1,
"is_genesis_random": true, "is_genesis_random": true,
"max_num_tx_in_block": 20, "max_num_tx_in_block": 20,
@ -8,7 +7,6 @@
"mempool_max_size": 1000, "mempool_max_size": 1000,
"block_create_timeout": "15s", "block_create_timeout": "15s",
"retry_pending_blocks_timeout": "5s", "retry_pending_blocks_timeout": "5s",
"port": 3040,
"bedrock_config": { "bedrock_config": {
"backoff": { "backoff": {
"start_delay": "100ms", "start_delay": "100ms",
@ -20,50 +18,50 @@
"indexer_rpc_url": "ws://localhost:8779", "indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [ "initial_accounts": [
{ {
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000 "balance": 10000
}, },
{ {
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000 "balance": 20000
} }
], ],
"initial_commitments": [ "initial_commitments": [
{ {
"npk":[ "npk": [
177, 139,
64, 19,
1, 158,
11, 11,
87, 155,
38,
254,
159,
231, 231,
165, 85,
1, 206,
94, 132,
64, 228,
137, 220,
243, 114,
76, 145,
249, 89,
101, 113,
251, 156,
129, 238,
33, 142,
101, 242,
189, 74,
30, 182,
42, 91,
11, 43,
191, 100,
34, 6,
103, 190,
186, 31,
227, 15,
230 31,
] , 88,
96,
204
],
"account": { "account": {
"program_owner": [ "program_owner": [
0, 0,
@ -82,38 +80,38 @@
}, },
{ {
"npk": [ "npk": [
32, 173,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134, 134,
135, 33,
210, 223,
143, 54,
87, 226,
232, 10,
71,
215, 215,
128, 254,
194, 143,
120, 172,
113, 24,
224, 244,
4, 243,
165 208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
], ],
"account": { "account": {
"program_owner": [ "program_owner": [
@ -166,4 +164,4 @@
37, 37,
37 37
] ]
} }

View File

@ -1,13 +1,11 @@
{ {
"home": "/var/lib/sequencer_runner", "home": "/var/lib/sequencer_service",
"override_rust_log": null,
"genesis_id": 1, "genesis_id": 1,
"is_genesis_random": true, "is_genesis_random": true,
"max_num_tx_in_block": 20, "max_num_tx_in_block": 20,
"max_block_size": "1 MiB", "max_block_size": "1 MiB",
"mempool_max_size": 10000, "mempool_max_size": 10000,
"block_create_timeout": "10s", "block_create_timeout": "10s",
"port": 3040,
"retry_pending_blocks_timeout": "7s", "retry_pending_blocks_timeout": "7s",
"bedrock_config": { "bedrock_config": {
"backoff": { "backoff": {
@ -20,49 +18,49 @@
"indexer_rpc_url": "ws://localhost:8779", "indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [ "initial_accounts": [
{ {
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", "account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000 "balance": 10000
}, },
{ {
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", "account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000 "balance": 20000
} }
], ],
"initial_commitments": [ "initial_commitments": [
{ {
"npk": [ "npk": [
63, 139,
202, 19,
178, 158,
11,
155,
231, 231,
183, 85,
82, 206,
237, 132,
212,
216,
221,
215,
255,
153,
101,
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
228, 228,
97 220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
], ],
"account": { "account": {
"program_owner": [ "program_owner": [
@ -82,38 +80,38 @@
}, },
{ {
"npk": [ "npk": [
192, 173,
251, 134,
166, 33,
243, 223,
167, 54,
236, 226,
84, 10,
249, 71,
35, 215,
136, 254,
130, 143,
172, 172,
219, 24,
225,
161,
139,
229,
89,
243,
125,
194,
213,
209,
30,
23,
174,
100,
244, 244,
124, 243,
74, 208,
140, 65,
47 112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
], ],
"account": { "account": {
"program_owner": [ "program_owner": [

View File

@ -0,0 +1,14 @@
services:
sequencer_service:
image: lssa/sequencer_service
build:
context: ../..
dockerfile: sequencer/service/Dockerfile
container_name: sequencer_service
ports:
- "3040:3040"
volumes:
# Mount configuration file
- ./configs/docker/sequencer_config.json:/etc/sequencer_service/sequencer_config.json
# Mount data folder
- ./data:/var/lib/sequencer_service

View File

@ -1,11 +1,11 @@
#!/bin/sh #!/bin/sh
# This is an entrypoint script for the sequencer_runner Docker container, # This is an entrypoint script for the sequencer_service Docker container,
# it's not meant to be executed outside of the container. # it's not meant to be executed outside of the container.
set -e set -e
CONFIG="/etc/sequencer_runner/sequencer_config.json" CONFIG="/etc/sequencer/service/sequencer_config.json"
# Check config file exists # Check config file exists
if [ ! -f "$CONFIG" ]; then if [ ! -f "$CONFIG" ]; then

View File

@ -0,0 +1,13 @@
[package]
name = "sequencer_service_protocol"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
nssa.workspace = true
nssa_core.workspace = true

View File

@ -0,0 +1,9 @@
//! Reexports of types used by sequencer rpc specification.
pub use common::{
HashType,
block::{Block, BlockId},
transaction::NSSATransaction,
};
pub use nssa::{Account, AccountId, ProgramId};
pub use nssa_core::{Commitment, MembershipProof, account::Nonce};

View File

@ -0,0 +1,17 @@
[package]
name = "sequencer_service_rpc"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
sequencer_service_protocol.workspace = true
jsonrpsee = { workspace = true, features = ["macros"] }
[features]
client = ["jsonrpsee/client"]
server = ["jsonrpsee/server"]

View File

@ -0,0 +1,92 @@
use std::collections::BTreeMap;
use jsonrpsee::proc_macros::rpc;
#[cfg(feature = "server")]
use jsonrpsee::types::ErrorObjectOwned;
#[cfg(feature = "client")]
pub use jsonrpsee::{core::ClientError, http_client::HttpClientBuilder as SequencerClientBuilder};
use sequencer_service_protocol::{
Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, NSSATransaction,
Nonce, ProgramId,
};
#[cfg(all(not(feature = "server"), not(feature = "client")))]
compile_error!("At least one of `server` or `client` features must be enabled.");
/// Type alias for RPC client. Only available when `client` feature is enabled.
///
/// It's cheap to clone this client, so it can be cloned and shared across the application.
///
/// # Example
///
/// ```no_run
/// use common::transaction::NSSATransaction;
/// use sequencer_service_rpc::{RpcClient as _, SequencerClientBuilder};
///
/// let url = "http://localhost:3040".parse()?;
/// let client = SequencerClientBuilder::default().build(url)?;
///
/// let tx: NSSATransaction = unimplemented!("Construct your transaction here");
/// let tx_hash = client.send_transaction(tx).await?;
/// ```
#[cfg(feature = "client")]
pub type SequencerClient = jsonrpsee::http_client::HttpClient;
#[cfg_attr(all(feature = "server", not(feature = "client")), rpc(server))]
#[cfg_attr(all(feature = "client", not(feature = "server")), rpc(client))]
#[cfg_attr(all(feature = "server", feature = "client"), rpc(server, client))]
pub trait Rpc {
#[method(name = "sendTransaction")]
async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned>;
// TODO: expand healthcheck response into some kind of report
#[method(name = "checkHealth")]
async fn check_health(&self) -> Result<(), ErrorObjectOwned>;
// TODO: These functions should be removed after wallet starts using indexer
// for this type of queries.
//
// =============================================================================================
#[method(name = "getBlock")]
async fn get_block(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getBlockRange")]
async fn get_block_range(
&self,
start_block_id: BlockId,
end_block_id: BlockId,
) -> Result<Vec<Block>, ErrorObjectOwned>;
#[method(name = "getLastBlockId")]
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getAccountBalance")]
async fn get_account_balance(&self, account_id: AccountId) -> Result<u128, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<NSSATransaction>, ErrorObjectOwned>;
#[method(name = "getAccountsNonces")]
async fn get_accounts_nonces(
&self,
account_ids: Vec<AccountId>,
) -> Result<Vec<Nonce>, ErrorObjectOwned>;
#[method(name = "getProofForCommitment")]
async fn get_proof_for_commitment(
&self,
commitment: Commitment,
) -> Result<Option<MembershipProof>, ErrorObjectOwned>;
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getProgramIds")]
async fn get_program_ids(&self) -> Result<BTreeMap<String, ProgramId>, ErrorObjectOwned>;
// =============================================================================================
}

View File

@ -1,59 +1,75 @@
use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; use std::{net::SocketAddr, sync::Arc, time::Duration};
use actix_web::dev::ServerHandle; use anyhow::{Context as _, Result, anyhow};
use anyhow::{Context as _, Result}; use bytesize::ByteSize;
use clap::Parser; use common::transaction::NSSATransaction;
use common::rpc_primitives::RpcConfig; use futures::never::Never;
use futures::{FutureExt as _, never::Never}; use jsonrpsee::server::ServerHandle;
#[cfg(not(feature = "standalone"))] #[cfg(not(feature = "standalone"))]
use log::warn; use log::warn;
use log::{error, info}; use log::{error, info};
use mempool::MemPoolHandle;
#[cfg(feature = "standalone")] #[cfg(feature = "standalone")]
use sequencer_core::SequencerCoreWithMockClients as SequencerCore; use sequencer_core::SequencerCoreWithMockClients as SequencerCore;
use sequencer_core::config::SequencerConfig; pub use sequencer_core::config::*;
#[cfg(not(feature = "standalone"))] #[cfg(not(feature = "standalone"))]
use sequencer_core::{SequencerCore, block_settlement_client::BlockSettlementClientTrait as _}; use sequencer_core::{SequencerCore, block_settlement_client::BlockSettlementClientTrait as _};
use sequencer_rpc::new_http_server; use sequencer_service_rpc::RpcServer as _;
use tokio::{sync::Mutex, task::JoinHandle}; use tokio::{sync::Mutex, task::JoinHandle};
pub const RUST_LOG: &str = "RUST_LOG"; pub mod service;
#[derive(Parser, Debug)] const REQUEST_BODY_MAX_SIZE: ByteSize = ByteSize::mib(10);
#[clap(version)]
struct Args {
/// Path to configs.
home_dir: PathBuf,
}
/// Handle to manage the sequencer and its tasks. /// Handle to manage the sequencer and its tasks.
/// ///
/// Implements `Drop` to ensure all tasks are aborted and the HTTP server is stopped when dropped. /// Implements `Drop` to ensure all tasks are aborted and the RPC server is stopped when dropped.
pub struct SequencerHandle { pub struct SequencerHandle {
addr: SocketAddr, addr: SocketAddr,
http_server_handle: ServerHandle, /// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>,
main_loop_handle: JoinHandle<Result<Never>>, main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>, retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>, listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
} }
impl SequencerHandle { impl SequencerHandle {
/// Runs the sequencer indefinitely, monitoring its tasks. const fn new(
/// addr: SocketAddr,
/// If no error occurs, this function will never return. server_handle: ServerHandle,
main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
) -> Self {
Self {
addr,
server_handle: Some(server_handle),
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
}
}
/// Wait for any of the sequencer tasks to fail and return the error.
#[expect( #[expect(
clippy::integer_division_remainder_used, clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint" reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)] )]
pub async fn run_forever(&mut self) -> Result<Never> { pub async fn failed(mut self) -> Result<Never> {
let Self { let Self {
addr: _, addr: _,
http_server_handle: _, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle, retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle, listen_for_bedrock_blocks_loop_handle,
} = self; } = &mut self;
let server_handle = server_handle.take().expect("Server handle is set");
tokio::select! { tokio::select! {
() = server_handle.stopped() => {
Err(anyhow!("RPC Server stopped"))
}
res = main_loop_handle => { res = main_loop_handle => {
res res
.context("Main loop task panicked")? .context("Main loop task panicked")?
@ -72,11 +88,25 @@ impl SequencerHandle {
} }
} }
/// Check if all Sequencer tasks are still running.
///
/// Return `false` if any of the tasks has failed and `true` otherwise.
/// Error of the failed task can be retrieved by awaiting on [`Self::failed()`].
#[must_use] #[must_use]
pub fn is_finished(&self) -> bool { pub fn is_healthy(&self) -> bool {
self.main_loop_handle.is_finished() let Self {
|| self.retry_pending_blocks_loop_handle.is_finished() addr: _,
|| self.listen_for_bedrock_blocks_loop_handle.is_finished() server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = self;
let stopped = server_handle.as_ref().is_none_or(ServerHandle::is_stopped)
|| main_loop_handle.is_finished()
|| retry_pending_blocks_loop_handle.is_finished()
|| listen_for_bedrock_blocks_loop_handle.is_finished();
!stopped
} }
#[must_use] #[must_use]
@ -89,7 +119,7 @@ impl Drop for SequencerHandle {
fn drop(&mut self) { fn drop(&mut self) {
let Self { let Self {
addr: _, addr: _,
http_server_handle, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle, retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle, listen_for_bedrock_blocks_loop_handle,
@ -99,31 +129,35 @@ impl Drop for SequencerHandle {
retry_pending_blocks_loop_handle.abort(); retry_pending_blocks_loop_handle.abort();
listen_for_bedrock_blocks_loop_handle.abort(); listen_for_bedrock_blocks_loop_handle.abort();
// Can't wait here as Drop can't be async, but anyway stop signal should be sent let Some(handle) = server_handle else {
http_server_handle.stop(true).now_or_never(); return;
};
if let Err(err) = handle.stop() {
error!("An error occurred while stopping Sequencer RPC server: {err}");
}
} }
} }
pub async fn startup_sequencer(app_config: SequencerConfig) -> Result<SequencerHandle> { pub async fn run(config: SequencerConfig, port: u16) -> Result<SequencerHandle> {
let block_timeout = app_config.block_create_timeout; let block_timeout = config.block_create_timeout;
let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout; let retry_pending_blocks_timeout = config.retry_pending_blocks_timeout;
let port = app_config.port; let max_block_size = config.max_block_size;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config).await; let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(config).await;
info!("Sequencer core set up"); info!("Sequencer core set up");
let seq_core_wrapped = Arc::new(Mutex::new(sequencer_core)); let seq_core_wrapped = Arc::new(Mutex::new(sequencer_core));
let (http_server, addr) = new_http_server( let (server_handle, addr) = run_server(
RpcConfig::with_port(port),
Arc::clone(&seq_core_wrapped), Arc::clone(&seq_core_wrapped),
mempool_handle, mempool_handle,
port,
max_block_size.as_u64(),
) )
.await?; .await?;
info!("HTTP server started"); info!("RPC server started");
let http_server_handle = http_server.handle();
tokio::spawn(http_server);
#[cfg(not(feature = "standalone"))] #[cfg(not(feature = "standalone"))]
{ {
@ -146,13 +180,42 @@ pub async fn startup_sequencer(app_config: SequencerConfig) -> Result<SequencerH
let listen_for_bedrock_blocks_loop_handle = let listen_for_bedrock_blocks_loop_handle =
tokio::spawn(listen_for_bedrock_blocks_loop(seq_core_wrapped)); tokio::spawn(listen_for_bedrock_blocks_loop(seq_core_wrapped));
Ok(SequencerHandle { Ok(SequencerHandle::new(
addr, addr,
http_server_handle, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle, retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle, listen_for_bedrock_blocks_loop_handle,
}) ))
}
async fn run_server(
sequencer: Arc<Mutex<SequencerCore>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
port: u16,
max_block_size: u64,
) -> Result<(ServerHandle, SocketAddr)> {
let server = jsonrpsee::server::ServerBuilder::with_config(
jsonrpsee::server::ServerConfigBuilder::new()
.max_request_body_size(
u32::try_from(REQUEST_BODY_MAX_SIZE.as_u64())
.expect("REQUEST_BODY_MAX_SIZE should be less than u32::MAX"),
)
.build(),
)
.build(SocketAddr::from(([0, 0, 0, 0], port)))
.await
.context("Failed to build RPC server")?;
let addr = server
.local_addr()
.context("Failed to get local address of RPC server")?;
info!("Starting Sequencer Service RPC server on {addr}");
let service = service::SequencerService::new(sequencer, mempool_handle, max_block_size);
let handle = server.start(service.into_rpc());
Ok((handle, addr))
} }
async fn main_loop(seq_core: Arc<Mutex<SequencerCore>>, block_timeout: Duration) -> Result<Never> { async fn main_loop(seq_core: Arc<Mutex<SequencerCore>>, block_timeout: Duration) -> Result<Never> {
@ -210,7 +273,7 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
.create_inscribe_tx(block) .create_inscribe_tx(block)
.context("Failed to create inscribe tx for pending block")?; .context("Failed to create inscribe tx for pending block")?;
debug!(">>>> Create inscribe: {:?}", now.elapsed()); debug!("Create inscribe: {:?}", now.elapsed());
let now = Instant::now(); let now = Instant::now();
if let Err(e) = block_settlement_client if let Err(e) = block_settlement_client
@ -222,7 +285,7 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
block.header.block_id block.header.block_id
); );
} }
debug!(">>>> Post: {:?}", now.elapsed()); debug!("Post: {:?}", now.elapsed());
} }
Ok(()) Ok(())
} }
@ -287,33 +350,3 @@ async fn retry_pending_blocks_loop(
) -> Result<Never> { ) -> Result<Never> {
std::future::pending::<Result<Never>>().await std::future::pending::<Result<Never>>().await
} }
pub async fn main_runner() -> Result<()> {
env_logger::init();
let args = Args::parse();
let Args { home_dir } = args;
let app_config = SequencerConfig::from_path(&home_dir.join("sequencer_config.json"))?;
if let Some(rust_log) = &app_config.override_rust_log {
info!("RUST_LOG env var set to {rust_log:?}");
// SAFETY: there is no other threads running at this point
unsafe {
std::env::set_var(RUST_LOG, rust_log);
}
}
// ToDo: Add restart on failures
let mut sequencer_handle = startup_sequencer(app_config).await?;
info!("Sequencer running. Monitoring concurrent tasks...");
let Err(err) = sequencer_handle.run_forever().await;
error!("Sequencer failed: {err:#}");
info!("Shutting down sequencer...");
Ok(())
}

View File

@ -0,0 +1,60 @@
use std::path::PathBuf;
use anyhow::Result;
use clap::Parser;
use log::{error, info};
use tokio_util::sync::CancellationToken;
#[derive(Debug, Parser)]
#[clap(version)]
struct Args {
#[clap(name = "config")]
config_path: PathBuf,
#[clap(short, long, default_value = "3040")]
port: u16,
}
#[tokio::main]
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
async fn main() -> Result<()> {
env_logger::init();
let Args { config_path, port } = Args::parse();
let cancellation_token = listen_for_shutdown_signal();
let config = sequencer_service::SequencerConfig::from_path(&config_path)?;
let sequencer_handle = sequencer_service::run(config, port).await?;
tokio::select! {
() = cancellation_token.cancelled() => {
info!("Shutting down sequencer...");
}
Err(err) = sequencer_handle.failed() => {
error!("Sequencer failed unexpectedly: {err}");
}
}
info!("Sequencer shutdown complete");
Ok(())
}
fn listen_for_shutdown_signal() -> CancellationToken {
let cancellation_token = CancellationToken::new();
let cancellation_token_clone = cancellation_token.clone();
tokio::spawn(async move {
if let Err(err) = tokio::signal::ctrl_c().await {
error!("Failed to listen for Ctrl-C signal: {err}");
return;
}
info!("Received Ctrl-C signal");
cancellation_token_clone.cancel();
});
cancellation_token
}

View File

@ -0,0 +1,183 @@
use std::{collections::BTreeMap, sync::Arc};
use common::transaction::NSSATransaction;
use jsonrpsee::{
core::async_trait,
types::{ErrorCode, ErrorObjectOwned},
};
use log::warn;
use mempool::MemPoolHandle;
use nssa::{self, program::Program};
use sequencer_core::{
DbError, SequencerCore, block_settlement_client::BlockSettlementClientTrait,
indexer_client::IndexerClientTrait,
};
use sequencer_service_protocol::{
Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, Nonce, ProgramId,
};
use tokio::sync::Mutex;
const NOT_FOUND_ERROR_CODE: i32 = -31999;
pub struct SequencerService<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> {
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64,
}
impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerService<BC, IC> {
pub const fn new(
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64,
) -> Self {
Self {
sequencer,
mempool_handle,
max_block_size,
}
}
}
#[async_trait]
impl<BC: BlockSettlementClientTrait + Send + 'static, IC: IndexerClientTrait + Send + 'static>
sequencer_service_rpc::RpcServer for SequencerService<BC, IC>
{
async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned> {
// Reserve ~200 bytes for block header overhead
const BLOCK_HEADER_OVERHEAD: u64 = 200;
let tx_hash = tx.hash();
let encoded_tx =
borsh::to_vec(&tx).expect("Transaction borsh serialization should not fail");
let tx_size = u64::try_from(encoded_tx.len()).expect("Transaction size should fit in u64");
let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD);
if tx_size > max_tx_size {
return Err(ErrorObjectOwned::owned(
ErrorCode::InvalidParams.code(),
format!("Transaction too large: size {tx_size}, max {max_tx_size}"),
None::<()>,
));
}
let authenticated_tx = tx
.transaction_stateless_check()
.inspect_err(|err| warn!("Error at pre_check {err:#?}"))
.map_err(|err| {
ErrorObjectOwned::owned(
ErrorCode::InvalidParams.code(),
format!("{err:?}"),
None::<()>,
)
})?;
self.mempool_handle
.push(authenticated_tx)
.await
.expect("Mempool is closed, this is a bug");
Ok(tx_hash)
}
async fn check_health(&self) -> Result<(), ErrorObjectOwned> {
Ok(())
}
async fn get_block(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
sequencer
.block_store()
.get_block_at_id(block_id)
.map_err(|err| internal_error(&err))
}
async fn get_block_range(
&self,
start_block_id: BlockId,
end_block_id: BlockId,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
(start_block_id..=end_block_id)
.map(|block_id| {
let block = sequencer
.block_store()
.get_block_at_id(block_id)
.map_err(|err| internal_error(&err))?;
block.ok_or_else(|| {
ErrorObjectOwned::owned(
NOT_FOUND_ERROR_CODE,
format!("Block with id {block_id} not found"),
None::<()>,
)
})
})
.collect::<Result<Vec<_>, _>>()
}
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.chain_height())
}
async fn get_account_balance(&self, account_id: AccountId) -> Result<u128, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
let account = sequencer.state().get_account_by_id(account_id);
Ok(account.balance)
}
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<NSSATransaction>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.block_store().get_transaction_by_hash(tx_hash))
}
async fn get_accounts_nonces(
&self,
account_ids: Vec<AccountId>,
) -> Result<Vec<Nonce>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
let nonces = account_ids
.into_iter()
.map(|account_id| sequencer.state().get_account_by_id(account_id).nonce)
.collect();
Ok(nonces)
}
async fn get_proof_for_commitment(
&self,
commitment: Commitment,
) -> Result<Option<MembershipProof>, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.state().get_proof_for_commitment(&commitment))
}
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
let sequencer = self.sequencer.lock().await;
Ok(sequencer.state().get_account_by_id(account_id))
}
async fn get_program_ids(&self) -> Result<BTreeMap<String, ProgramId>, ErrorObjectOwned> {
let mut program_ids = BTreeMap::new();
program_ids.insert(
"authenticated_transfer".to_owned(),
Program::authenticated_transfer_program().id(),
);
program_ids.insert("token".to_owned(), Program::token().id());
program_ids.insert("pinata".to_owned(), Program::pinata().id());
program_ids.insert("amm".to_owned(), Program::amm().id());
program_ids.insert(
"privacy_preserving_circuit".to_owned(),
nssa::PRIVACY_PRESERVING_CIRCUIT_ID,
);
Ok(program_ids)
}
}
fn internal_error(err: &DbError) -> ErrorObjectOwned {
ErrorObjectOwned::owned(ErrorCode::InternalError.code(), err.to_string(), None::<()>)
}

View File

@ -1,39 +0,0 @@
[package]
name = "sequencer_rpc"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa.workspace = true
common.workspace = true
mempool.workspace = true
sequencer_core = { workspace = true }
bedrock_client.workspace = true
anyhow.workspace = true
serde_json.workspace = true
log.workspace = true
serde.workspace = true
actix-cors.workspace = true
futures.workspace = true
base58.workspace = true
hex.workspace = true
tempfile.workspace = true
base64.workspace = true
itertools.workspace = true
actix-web.workspace = true
tokio.workspace = true
borsh.workspace = true
bytesize.workspace = true
[dev-dependencies]
sequencer_core = { workspace = true, features = ["mock"] }
[features]
default = []
# Includes types to run the sequencer in standalone mode
standalone = ["sequencer_core/mock"]

View File

@ -1,55 +0,0 @@
use std::sync::Arc;
use common::{
rpc_primitives::errors::{RpcError, RpcErrorKind},
transaction::NSSATransaction,
};
use mempool::MemPoolHandle;
pub use net_utils::*;
#[cfg(feature = "standalone")]
use sequencer_core::mock::{MockBlockSettlementClient, MockIndexerClient};
use sequencer_core::{
SequencerCore,
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait},
indexer_client::{IndexerClient, IndexerClientTrait},
};
use serde::Serialize;
use serde_json::Value;
use tokio::sync::Mutex;
use self::types::err_rpc::RpcErr;
pub mod net_utils;
pub mod process;
pub mod types;
#[cfg(feature = "standalone")]
pub type JsonHandlerWithMockClients = JsonHandler<MockBlockSettlementClient, MockIndexerClient>;
// ToDo: Add necessary fields
pub struct JsonHandler<
BC: BlockSettlementClientTrait = BlockSettlementClient,
IC: IndexerClientTrait = IndexerClient,
> {
sequencer_state: Arc<Mutex<SequencerCore<BC, IC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: usize,
}
fn respond<T: Serialize>(val: T) -> Result<Value, RpcErr> {
Ok(serde_json::to_value(val)?)
}
#[must_use]
pub fn rpc_error_responce_inverter(err: RpcError) -> RpcError {
let content = err.error_struct.map(|error| match error {
RpcErrorKind::HandlerError(val) | RpcErrorKind::InternalError(val) => val,
RpcErrorKind::RequestValidationError(vall) => serde_json::to_value(vall).unwrap(),
});
RpcError {
error_struct: None,
code: err.code,
message: err.message,
data: content,
}
}

View File

@ -1,104 +0,0 @@
use std::{io, net::SocketAddr, sync::Arc};
use actix_cors::Cors;
use actix_web::{App, Error as HttpError, HttpResponse, HttpServer, http, middleware, web};
use common::{
rpc_primitives::{RpcConfig, message::Message},
transaction::NSSATransaction,
};
use futures::{Future, FutureExt as _};
use log::info;
use mempool::MemPoolHandle;
#[cfg(not(feature = "standalone"))]
use sequencer_core::SequencerCore;
#[cfg(feature = "standalone")]
use sequencer_core::SequencerCoreWithMockClients as SequencerCore;
use tokio::sync::Mutex;
#[cfg(not(feature = "standalone"))]
use super::JsonHandler;
use crate::process::Process;
pub const SHUTDOWN_TIMEOUT_SECS: u64 = 10;
pub const NETWORK: &str = "network";
#[cfg(feature = "standalone")]
type JsonHandler = super::JsonHandlerWithMockClients;
pub(crate) fn rpc_handler<P: Process>(
message: web::Json<Message>,
handler: web::Data<P>,
) -> impl Future<Output = Result<HttpResponse, HttpError>> {
let response = async move {
let message = handler.process(message.0).await?;
Ok(HttpResponse::Ok().json(&message))
};
response.boxed()
}
fn get_cors(cors_allowed_origins: &[String]) -> Cors {
let mut cors = Cors::permissive();
if cors_allowed_origins != ["*".to_owned()] {
for origin in cors_allowed_origins {
cors = cors.allowed_origin(origin);
}
}
cors.allowed_methods(vec!["GET", "POST"])
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.max_age(3600)
}
pub async fn new_http_server(
config: RpcConfig,
seuquencer_core: Arc<Mutex<SequencerCore>>,
mempool_handle: MemPoolHandle<NSSATransaction>,
) -> io::Result<(actix_web::dev::Server, SocketAddr)> {
let RpcConfig {
addr,
cors_allowed_origins,
limits_config,
} = config;
info!(target:NETWORK, "Starting HTTP server at {addr}");
let max_block_size = seuquencer_core
.lock()
.await
.sequencer_config()
.max_block_size
.as_u64()
.try_into()
.expect("`max_block_size` is expected to fit into usize");
let handler = web::Data::new(JsonHandler {
sequencer_state: Arc::clone(&seuquencer_core),
mempool_handle,
max_block_size,
});
// HTTP server
let http_server = HttpServer::new(move || {
let json_limit = limits_config
.json_payload_max_size
.as_u64()
.try_into()
.expect("`json_payload_max_size` is expected to fit into usize");
App::new()
.wrap(get_cors(&cors_allowed_origins))
.app_data(handler.clone())
.app_data(web::JsonConfig::default().limit(json_limit))
.wrap(middleware::Logger::default())
.service(web::resource("/").route(web::post().to(rpc_handler::<JsonHandler>)))
})
.bind(addr)?
.shutdown_timeout(SHUTDOWN_TIMEOUT_SECS)
.disable_signals();
let [final_addr] = http_server
.addrs()
.try_into()
.expect("Exactly one address bound is expected for sequencer HTTP server");
info!(target:NETWORK, "HTTP server started at {final_addr}");
Ok((http_server.run(), final_addr))
}

View File

@ -1,49 +0,0 @@
use common::{
rpc_primitives::errors::{RpcError, RpcParseError},
transaction::TransactionMalformationError,
};
macro_rules! standard_rpc_err_kind {
($type_name:path) => {
impl RpcErrKind for $type_name {
fn into_rpc_err(self) -> RpcError {
self.into()
}
}
};
}
pub struct RpcErr(pub RpcError);
pub type RpcErrInternal = anyhow::Error;
pub trait RpcErrKind: 'static {
fn into_rpc_err(self) -> RpcError;
}
impl<T: RpcErrKind> From<T> for RpcErr {
fn from(e: T) -> Self {
Self(e.into_rpc_err())
}
}
standard_rpc_err_kind!(RpcError);
standard_rpc_err_kind!(RpcParseError);
impl RpcErrKind for serde_json::Error {
fn into_rpc_err(self) -> RpcError {
RpcError::serialization_error(&self.to_string())
}
}
impl RpcErrKind for RpcErrInternal {
fn into_rpc_err(self) -> RpcError {
RpcError::new_internal_error(None, &format!("{self:#?}"))
}
}
impl RpcErrKind for TransactionMalformationError {
fn into_rpc_err(self) -> RpcError {
RpcError::invalid_params(Some(serde_json::to_value(self).unwrap()))
}
}

View File

@ -1 +0,0 @@
pub mod err_rpc;

View File

@ -1,14 +0,0 @@
services:
sequencer_runner:
image: lssa/sequencer_runner
build:
context: ..
dockerfile: sequencer_runner/Dockerfile
container_name: sequencer_runner
ports:
- "3040:3040"
volumes:
# Mount configuration folder
- ./configs/docker:/etc/sequencer_runner
# Mount data folder
- ./data:/var/lib/sequencer_runner

View File

@ -1,16 +0,0 @@
use anyhow::Result;
use sequencer_runner::main_runner;
pub const NUM_THREADS: usize = 4;
// TODO: Why it requires config as a directory and not as a file?
fn main() -> Result<()> {
actix::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(NUM_THREADS)
.enable_all()
.build()
.unwrap()
})
.block_on(main_runner())
}

View File

@ -514,7 +514,7 @@ impl RocksDBIO {
Ok(()) Ok(())
} }
pub fn get_block(&self, block_id: u64) -> DbResult<Block> { pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column(); let cf_block = self.block_column();
let res = self let res = self
.db .db
@ -530,16 +530,14 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res { if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| { Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message( DbError::borsh_cast_message(
serr, serr,
Some("Failed to deserialize block data".to_owned()), Some("Failed to deserialize block data".to_owned()),
) )
})?) })?))
} else { } else {
Err(DbError::db_interaction_error( Ok(None)
"Block on this id not found".to_owned(),
))
} }
} }
@ -618,7 +616,7 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
} }
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V02State> { fn get_breakpoint(&self, br_id: u64) -> DbResult<V02State> {
let cf_br = self.breakpoint_column(); let cf_br = self.breakpoint_column();
let res = self let res = self
.db .db
@ -641,6 +639,8 @@ impl RocksDBIO {
) )
})?) })?)
} else { } else {
// Note: this is not a `DbError::NotFound` case, because we expect that all searched
// breakpoints will be present in db as this is an internal method.
Err(DbError::db_interaction_error( Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(), "Breakpoint on this id not found".to_owned(),
)) ))
@ -665,7 +665,9 @@ impl RocksDBIO {
}; };
for id in start..=block_id { for id in start..=block_id {
let block = self.get_block(id)?; let block = self.get_block(id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {id} not found"))
})?;
for transaction in block.body.transactions { for transaction in block.body.transactions {
transaction transaction
@ -686,9 +688,9 @@ impl RocksDBIO {
Ok(breakpoint) Ok(breakpoint)
} else { } else {
Err(DbError::db_interaction_error( Err(DbError::db_interaction_error(format!(
"Block on this id not found".to_owned(), "Block with id {block_id} not found"
)) )))
} }
} }
@ -720,7 +722,7 @@ impl RocksDBIO {
// Mappings // Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<u64> { pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column(); let cf_hti = self.hash_to_id_column();
let res = self let res = self
.db .db
@ -736,17 +738,15 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res { if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|serr| { Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned())) DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?) })?))
} else { } else {
Err(DbError::db_interaction_error( Ok(None)
"Block on this hash not found".to_owned(),
))
} }
} }
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<u64> { pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column(); let cf_tti = self.tx_hash_to_id_column();
let res = self let res = self
.db .db
@ -762,13 +762,11 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res { if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|serr| { Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned())) DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?) })?))
} else { } else {
Err(DbError::db_interaction_error( Ok(None)
"Block for this tx hash not found".to_owned(),
))
} }
} }
@ -921,8 +919,14 @@ impl RocksDBIO {
let mut tx_batch = vec![]; let mut tx_batch = vec![];
for tx_hash in self.get_acc_transaction_hashes(acc_id, offset, limit)? { for tx_hash in self.get_acc_transaction_hashes(acc_id, offset, limit)? {
let block_id = self.get_block_id_by_tx_hash(tx_hash)?; let block_id = self.get_block_id_by_tx_hash(tx_hash)?.ok_or_else(|| {
let block = self.get_block(block_id)?; DbError::db_interaction_error(format!(
"Block id not found for tx hash {tx_hash:#?}"
))
})?;
let block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
let transaction = block let transaction = block
.body .body
@ -1019,7 +1023,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap(); let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(1).unwrap(); let last_block = dbio.get_block(1).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap(); let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap(); let final_state = dbio.final_state().unwrap();
@ -1056,7 +1060,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap(); let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap(); let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap(); let final_state = dbio.final_state().unwrap();
@ -1087,7 +1091,7 @@ mod tests {
for i in 1..BREAKPOINT_INTERVAL { for i in 1..BREAKPOINT_INTERVAL {
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, u128::from(i - 1), true); let transfer_tx = transfer(1, u128::from(i - 1), true);
@ -1103,7 +1107,7 @@ mod tests {
let first_id = dbio.get_meta_first_block_in_db().unwrap(); let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap(); let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap(); let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_breakpoint = dbio.get_breakpoint(0).unwrap(); let prev_breakpoint = dbio.get_breakpoint(0).unwrap();
let breakpoint = dbio.get_breakpoint(1).unwrap(); let breakpoint = dbio.get_breakpoint(1).unwrap();
let final_state = dbio.final_state().unwrap(); let final_state = dbio.final_state().unwrap();
@ -1142,7 +1146,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true); let transfer_tx = transfer(1, 0, true);
@ -1153,7 +1157,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap(); dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true); let transfer_tx = transfer(1, 1, true);
@ -1164,7 +1168,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap(); dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true); let transfer_tx = transfer(1, 2, true);
@ -1175,7 +1179,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap(); dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true); let transfer_tx = transfer(1, 3, true);
@ -1185,10 +1189,16 @@ mod tests {
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap(); dbio.put_block(&block, [4; 32]).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap(); let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap(); let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap().unwrap();
let control_block_id3 = dbio.get_block_id_by_tx_hash(control_tx_hash1.0).unwrap(); let control_block_id3 = dbio
let control_block_id4 = dbio.get_block_id_by_tx_hash(control_tx_hash2.0).unwrap(); .get_block_id_by_tx_hash(control_tx_hash1.0)
.unwrap()
.unwrap();
let control_block_id4 = dbio
.get_block_id_by_tx_hash(control_tx_hash2.0)
.unwrap()
.unwrap();
assert_eq!(control_block_id1, 2); assert_eq!(control_block_id1, 2);
assert_eq!(control_block_id2, 3); assert_eq!(control_block_id2, 3);
@ -1207,7 +1217,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true); let transfer_tx = transfer(1, 0, true);
@ -1217,7 +1227,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap(); dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true); let transfer_tx = transfer(1, 1, true);
@ -1227,7 +1237,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap(); dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true); let transfer_tx = transfer(1, 2, true);
@ -1237,7 +1247,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap(); dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true); let transfer_tx = transfer(1, 3, true);
@ -1285,7 +1295,7 @@ mod tests {
RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 0, true); let transfer_tx = transfer(1, 0, true);
@ -1297,7 +1307,7 @@ mod tests {
dbio.put_block(&block, [1; 32]).unwrap(); dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 1, true); let transfer_tx = transfer(1, 1, true);
@ -1309,7 +1319,7 @@ mod tests {
dbio.put_block(&block, [2; 32]).unwrap(); dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 2, true); let transfer_tx = transfer(1, 2, true);
@ -1321,7 +1331,7 @@ mod tests {
dbio.put_block(&block, [3; 32]).unwrap(); dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap(); let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash; let prev_hash = last_block.header.hash;
let transfer_tx = transfer(1, 3, true); let transfer_tx = transfer(1, 3, true);

View File

@ -442,7 +442,7 @@ impl RocksDBIO {
Ok(()) Ok(())
} }
pub fn get_block(&self, block_id: u64) -> DbResult<Block> { pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column(); let cf_block = self.block_column();
let res = self let res = self
.db .db
@ -458,16 +458,14 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res { if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| { Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message( DbError::borsh_cast_message(
serr, serr,
Some("Failed to deserialize block data".to_owned()), Some("Failed to deserialize block data".to_owned()),
) )
})?) })?))
} else { } else {
Err(DbError::db_interaction_error( Ok(None)
"Block on this id not found".to_owned(),
))
} }
} }
@ -495,7 +493,7 @@ impl RocksDBIO {
})?) })?)
} else { } else {
Err(DbError::db_interaction_error( Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(), "NSSA state not found".to_owned(),
)) ))
} }
} }
@ -512,9 +510,9 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))? .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none() .is_none()
{ {
return Err(DbError::db_interaction_error( return Err(DbError::db_interaction_error(format!(
"Block on this id not found".to_owned(), "Block with id {block_id} not found"
)); )));
} }
self.db self.db
@ -525,7 +523,9 @@ impl RocksDBIO {
} }
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> { pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?; let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized; block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column(); let cf_block = self.block_column();

View File

@ -13,8 +13,8 @@ crate-type = ["rlib", "cdylib", "staticlib"]
[dependencies] [dependencies]
wallet.workspace = true wallet.workspace = true
nssa.workspace = true nssa.workspace = true
common.workspace = true
nssa_core.workspace = true nssa_core.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
tokio.workspace = true tokio.workspace = true
[build-dependencies] [build-dependencies]

View File

@ -123,7 +123,7 @@ pub unsafe extern "C" fn wallet_ffi_get_private_account_keys(
}; };
// NPK is a 32-byte array // NPK is a 32-byte array
let npk_bytes = key_chain.nullifer_public_key.0; let npk_bytes = key_chain.nullifier_public_key.0;
// VPK is a compressed secp256k1 point (33 bytes) // VPK is a compressed secp256k1 point (33 bytes)
let vpk_bytes = key_chain.viewing_public_key.to_bytes(); let vpk_bytes = key_chain.viewing_public_key.to_bytes();

View File

@ -28,7 +28,7 @@
use std::sync::OnceLock; use std::sync::OnceLock;
use common::error::ExecutionFailureKind; use ::wallet::ExecutionFailureKind;
// Re-export public types for cbindgen // Re-export public types for cbindgen
pub use error::WalletFfiError as FfiError; pub use error::WalletFfiError as FfiError;
use tokio::runtime::Handle; use tokio::runtime::Handle;

View File

@ -75,10 +75,9 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata(
let pinata = Pinata(&wallet); let pinata = Pinata(&wallet);
match block_on(pinata.claim(pinata_id, winner_id, solution)) { match block_on(pinata.claim(pinata_id, winner_id, solution)) {
Ok(response) => { Ok(tx_hash) => {
let tx_hash = CString::new(response.tx_hash.to_string()) let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw); .map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe { unsafe {
(*out_result).tx_hash = tx_hash; (*out_result).tx_hash = tx_hash;
(*out_result).success = true; (*out_result).success = true;
@ -181,8 +180,8 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_already_initializ
pinata pinata
.claim_private_owned_account_already_initialized(pinata_id, winner_id, solution, proof), .claim_private_owned_account_already_initialized(pinata_id, winner_id, solution, proof),
) { ) {
Ok((response, _shared_key)) => { Ok((tx_hash, _shared_key)) => {
let tx_hash = CString::new(response.tx_hash.to_string()) let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw); .map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe { unsafe {
@ -266,8 +265,8 @@ pub unsafe extern "C" fn wallet_ffi_claim_pinata_private_owned_not_initialized(
let pinata = Pinata(&wallet); let pinata = Pinata(&wallet);
match block_on(pinata.claim_private_owned_account(pinata_id, winner_id, solution)) { match block_on(pinata.claim_private_owned_account(pinata_id, winner_id, solution)) {
Ok((response, _shared_key)) => { Ok((tx_hash, _shared_key)) => {
let tx_hash = CString::new(response.tx_hash.to_string()) let tx_hash = CString::new(tx_hash.to_string())
.map_or(ptr::null_mut(), std::ffi::CString::into_raw); .map_or(ptr::null_mut(), std::ffi::CString::into_raw);
unsafe { unsafe {

View File

@ -1,5 +1,7 @@
//! Block synchronization functions. //! Block synchronization functions.
use sequencer_service_rpc::RpcClient as _;
use crate::{ use crate::{
block_on, block_on,
error::{print_error, WalletFfiError}, error::{print_error, WalletFfiError},
@ -134,10 +136,10 @@ pub unsafe extern "C" fn wallet_ffi_get_current_block_height(
} }
}; };
match block_on(wallet.sequencer_client.get_last_block()) { match block_on(wallet.sequencer_client.get_last_block_id()) {
Ok(response) => { Ok(last_block_id) => {
unsafe { unsafe {
*out_block_height = response.last_block; *out_block_height = last_block_id;
} }
WalletFfiError::Success WalletFfiError::Success
} }

Some files were not shown because too many files have changed in this diff Show More