addressed comments

This commit is contained in:
jonesmarvin8 2026-05-14 21:19:25 -04:00
parent 524a06099c
commit 52026c4065
232 changed files with 15475 additions and 9878 deletions

View File

@ -14,6 +14,8 @@ ignore = [
{ id = "RUSTSEC-2025-0141", reason = "`bincode` is unmaintained but continuing to use it." },
{ id = "RUSTSEC-2023-0089", reason = "atomic-polyfill is pulled transitively via risc0-zkvm; waiting on upstream fix (see https://github.com/risc0/risc0/issues/3453)" },
{ id = "RUSTSEC-2026-0097", reason = "`rand` v0.8.5 is present transitively from logos crates, modification may break integration" },
{ id = "RUSTSEC-2026-0118", reason = "`hickory-proto` v0.25.0-alpha.5 is present transitively from logos crates, modification may break integration" },
{ id = "RUSTSEC-2026-0119", reason = "`hickory-proto` v0.25.0-alpha.5 is present transitively from logos crates, modification may break integration" },
]
yanked = "deny"
unused-ignored-advisory = "deny"

View File

@ -134,7 +134,7 @@ jobs:
integration-tests:
runs-on: ubuntu-latest
timeout-minutes: 60
timeout-minutes: 90 # TODO: Apply CI cache to speed this up
steps:
- uses: actions/checkout@v5
with:
@ -158,39 +158,11 @@ jobs:
env:
RISC0_DEV_MODE: "1"
RUST_LOG: "info"
run: cargo nextest run -p integration_tests -- --skip tps_test --skip indexer
integration-tests-indexer:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
- uses: ./.github/actions/install-risc0
- uses: ./.github/actions/install-logos-blockchain-circuits
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install active toolchain
run: rustup install
- name: Install nextest
run: cargo install --locked cargo-nextest
- name: Run tests
env:
RISC0_DEV_MODE: "1"
RUST_LOG: "info"
run: cargo nextest run -p integration_tests indexer -- --skip tps_test
run: cargo nextest run -p integration_tests -- --skip tps_test
valid-proof-test:
runs-on: ubuntu-latest
timeout-minutes: 60
timeout-minutes: 90
steps:
- uses: actions/checkout@v5
with:
@ -225,7 +197,7 @@ jobs:
- uses: ./.github/actions/install-risc0
- name: Install just
run: cargo install just
run: cargo install --locked just
- name: Build artifacts
run: just build-artifacts

1733
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,9 @@ members = [
"programs/token",
"programs/associated_token_account/core",
"programs/associated_token_account",
"programs/authenticated_transfer/core",
"programs/faucet/core",
"programs/vault/core",
"sequencer/core",
"sequencer/service",
"sequencer/service/protocol",
@ -36,10 +39,9 @@ members = [
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
"indexer/ffi",
"keycard_wallet",
"indexer_ffi",
]
[workspace.dependencies]
@ -57,9 +59,9 @@ indexer_core = { path = "indexer/core" }
indexer_service = { path = "indexer/service" }
indexer_service_protocol = { path = "indexer/service/protocol" }
indexer_service_rpc = { path = "indexer/service/rpc" }
indexer_ffi = { path = "indexer_ffi" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
indexer_ffi = { path = "indexer/ffi" }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }
@ -67,8 +69,10 @@ amm_core = { path = "programs/amm/core" }
amm_program = { path = "programs/amm" }
ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" }
authenticated_transfer_core = { path = "programs/authenticated_transfer/core" }
faucet_core = { path = "programs/faucet/core" }
vault_core = { path = "programs/vault/core" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" }
keycard_wallet = { path = "keycard_wallet" }
@ -82,6 +86,7 @@ tokio-util = "0.7.18"
risc0-zkvm = { version = "3.0.5", features = ['std'] }
risc0-build = "3.0.5"
anyhow = "1.0.98"
derive_more = "2.1.1"
num_cpus = "1.13.1"
openssl = { version = "0.10", features = ["vendored"] }
openssl-probe = { version = "0.1.2" }
@ -123,13 +128,13 @@ url = { version = "2.5.4", features = ["serde"] }
tokio-retry = "0.3.0"
schemars = "1.2"
async-stream = "0.3.6"
pyo3 = { version = "0.24", features = ["auto-initialize"] }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",
@ -149,6 +154,7 @@ actix-web = { version = "4.13.0", default-features = false, features = [
] }
clap = { version = "4.5.42", features = ["derive", "env"] }
reqwest = { version = "0.12", features = ["json", "rustls-tls", "stream"] }
pyo3 = { version = "0.24", features = ["auto-initialize"] }
# Profile for leptos WASM release builds
[profile.wasm-release]

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -39,42 +39,42 @@ cryptarchia:
threshold: 1
timestamp: 0
gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0
genesis_state:
mantle_tx:
ops:
genesis_block:
header:
version: Bedrock
parent_block: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
block_root: b5f8787ac23674822414c70eea15d842da38f2e806ede1a73cf7b5cf0277da07
proof_of_leadership:
proof: '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
entropy_contribution: '0000000000000000000000000000000000000000000000000000000000000000'
leader_key: '0000000000000000000000000000000000000000000000000000000000000000'
voucher_cm: '0000000000000000000000000000000000000000000000000000000000000000'
signature: '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
transactions:
- mantle_tx:
ops:
- opcode: 0
payload:
inputs: [ ]
inputs: []
outputs:
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: '2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26'
- value: 1
pk: ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717
- opcode: 17
payload:
channel_id: "0000000000000000000000000000000000000000000000000000000000000000"
inscription: [ 103, 101, 110, 101, 115, 105, 115 ] # "genesis" in bytes
parent: "0000000000000000000000000000000000000000000000000000000000000000"
signer: "0000000000000000000000000000000000000000000000000000000000000000"
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !ZkSig
pi_a: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_b: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_c: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
- NoProof
channel_id: '0000000000000000000000000000000000000000000000000000000000000000'
inscription: '67656e65736973'
parent: '0000000000000000000000000000000000000000000000000000000000000000'
signer: '0000000000000000000000000000000000000000000000000000000000000000'
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
time:
slot_duration: '1.0'
chain_start_time: PLACEHOLDER_CHAIN_START_TIME

View File

@ -1,7 +1,7 @@
services:
logos-blockchain-node-0:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:c5243681b353278cabb562a176f0a5cfbefc2056f18cebc47fe0e3720c29fb12
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:9f1829dea335c56f6ff68ae37ea872ed5313b96b69e8ffe143c02b7217de85fc
ports:
- "${PORT:-8080}:18080/tcp"
volumes:

View File

@ -10,6 +10,7 @@ workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true
authenticated_transfer_core.workspace = true
clock_core.workspace = true
anyhow.workspace = true

View File

@ -114,11 +114,6 @@ impl HashableBlockData {
bedrock_parent_id,
}
}
#[must_use]
pub fn block_hash(&self) -> BlockHash {
OwnHasher::hash(&borsh::to_vec(&self).unwrap())
}
}
impl From<Block> for HashableBlockData {

View File

@ -47,12 +47,11 @@ pub fn produce_dummy_empty_transaction() -> NSSATransaction {
let program_id = nssa::program::Program::authenticated_transfer_program().id();
let account_ids = vec![];
let nonces = vec![];
let instruction_data: u128 = 0;
let message = nssa::public_transaction::Message::try_new(
program_id,
account_ids,
nonces,
instruction_data,
authenticated_transfer_core::Instruction::Initialize,
)
.unwrap();
let private_key = nssa::PrivateKey::try_new([1; 32]).unwrap();
@ -78,7 +77,9 @@ pub fn create_transaction_native_token_transfer(
program_id,
account_ids,
nonces,
balance_to_move,
authenticated_transfer_core::Instruction::Transfer {
amount: balance_to_move,
},
)
.unwrap();
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]);

View File

@ -67,13 +67,26 @@ impl NSSATransaction {
}
/// Validates the transaction against the current state and returns the resulting diff
/// without applying it. Rejects transactions that modify clock system accounts.
/// without applying it. Rejects transactions that modify clock system accounts and
/// rejects unsafe modifications of the system faucet account. Also rejects direct
/// invocation of the faucet program for user-submitted transactions.
///
/// This check is required for all user transactions. Only sequencer transaction may bypass this
/// check.
pub fn validate_on_state(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<ValidatedStateDiff, nssa::error::NssaError> {
if let Self::Public(tx) = self
&& tx.message().program_id == nssa::program::Program::faucet().id()
{
return Err(nssa::error::NssaError::InvalidInput(
"Transaction invokes restricted faucet program".into(),
));
}
let diff = match self {
Self::Public(tx) => {
ValidatedStateDiff::from_public_transaction(tx, state, block_id, timestamp)

View File

@ -1,160 +1,8 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://logos-blockchain-node-0:18080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
}
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134,
135,
210,
143,
87,
232,
215,
128,
194,
120,
113,
224,
4,
165
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
}
}
],
"signing_key": [
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37
]
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101"
}

View File

@ -1,7 +1,5 @@
{
"home": "/var/lib/sequencer_service",
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
"max_block_size": "1 MiB",
"mempool_max_size": 10000,
@ -16,117 +14,29 @@
"node_url": "http://logos-blockchain-node-0:18080"
},
"indexer_rpc_url": "ws://indexer_service:8779",
"initial_accounts": [
"genesis": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
"supply_account": {
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
}
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134,
135,
210,
143,
87,
232,
215,
128,
194,
120,
113,
224,
4,
165
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
"supply_account": {
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
},
{
"supply_account": {
"account_id": "61EsoYN6gvTLkveh1YSTMG3yJkncpHy5EGmxhSK4ew29",
"balance": 10000
}
},
{
"supply_account": {
"account_id": "3m6HQmCgmAvsxZtxAHPqqEqoBG4335fCG8TzxigyW7rE",
"balance": 20000
}
}
],

View File

@ -6,12 +6,17 @@ This tutorial walks you through using Keycard with Wallet CLI. Keycard is option
### Required hardware
- Keycard (Blank) - a Keycard, directly, from Keycard.tech cannot (currently) be updated to support LEE.
- Smartcard reader
- Applets (`math.cap` and `LEE_keycard.cap`). Eventually, both of these applets will be available in separate repos.
- `math.cap` is an applet to speed up computations on Keycard; developed by Bitgamma (Keycard-tech team).
- `LEE_keycard.cap` is an applet that contains LEE keycard protocol; developed by Bitgamma (Keycard-tech team)
### Firmware installation
Installation:
1. Install math applet on your keycard; this process only needs to be done once. In the root of repo:
```
sudo apt-get install -y default-jdk
wget https://github.com/martinpaljak/GlobalPlatformPro/releases/download/v25.10.20/gp.jar -P keycard_wallet/keycard_applets
cd keycard_wallet/keycard_applets
java -jar gp.jar --key c212e073ff8b4bbfaff4de8ab655221f --load math.cap
```
@ -19,6 +24,7 @@ Installation:
- Keycard Desktop is used to install the LEE key protocol to a blank keycard.
- Select (Re)Install Applet and upload the key binary (`keycard_wallet/keycard_applets/LEE_keycard.cap`).
![keycard-desktop.png](keycard-desktop.png)
- **Important:** keycard can only connect with one application at a time; if Keycard-Desktop is using keycard then Wallet CLI cannot access the same keycard, and vice-versa.
## Wallet with Keycard
Keycard functionality is available to Wallet CLI by setting up the following Python virtual environment:
@ -40,15 +46,32 @@ pip install -e keycard_wallet/python/keycard-py
source venv/bin/activate
```
## PIN entry
Each Keycard command prompts for a PIN interactively. To avoid re-entering it across multiple commands, export it as an environment variable:
```bash
export KEYCARD_PIN=123456
```
Unset it when done:
```bash
unset KEYCARD_PIN
```
## Keycard Commands
### Keycard
| Command | Key-path options | Description |
|-----------------------------------|------------------|--------------------------------------------------------------------------|
| `wallet keycard available` | — | Checks whether a Keycard reader and card are accessible |
| `wallet keycard load` | — | Loads a mnemonic phrase onto the Keycard |
| `wallet keycard get-private-keys` | `--key-path` | Retrieves private account keys (nsk, vsk) for the given BIP32 path |
| Command | Description |
|-----------------------------------|--------------------------------------------------------------------------|
| `wallet keycard available` | Checks whether a Keycard reader and card are accessible |
| `wallet keycard init` | Initializes a blank Keycard with a PIN and a generated PUK |
| `wallet keycard connect` | Establishes and saves a pairing with the Keycard |
| `wallet keycard disconnect` | Unpairs the Keycard and clears the saved pairing |
| `wallet keycard load` | Loads a mnemonic phrase onto the Keycard |
| `wallet keycard get-private-keys` | Retrieves private account keys (nsk, vsk) for a given BIP32 path |
1. Check keycard availability
```bash
@ -58,16 +81,40 @@ wallet keycard available
✅ Keycard is available.
```
2. Load a mnemonic phrase
2. Initialize a blank Keycard
```bash
wallet keycard load --mnemonic "fashion degree mountain wool question damp current pond grow dolphin chronic then"
wallet keycard init
# Output:
Keycard PIN:
Keycard PUK: 847302916485
Record this PUK and store it somewhere safe. It cannot be recovered.
✅ Keycard initialized successfully.
```
3. Connect (pair and save pairing for subsequent commands)
```bash
wallet keycard connect
# Output:
Keycard PIN:
✅ Keycard paired and ready.
```
4. Load a mnemonic phrase
```bash
# Supply mnemonic via environment variable to avoid interactive prompt
export KEYCARD_MNEMONIC="fashion degree mountain wool question damp current pond grow dolphin chronic then"
wallet keycard load
unset KEYCARD_MNEMONIC
# Output:
Keycard PIN:
✅ Keycard is now connected to wallet.
✅ Mnemonic phrase loaded successfully.
```
3. Get private keys for a path
5. Get private keys for a path
```bash
wallet keycard get-private-keys --key-path "m/44'/60'/0'/0/0"
@ -77,17 +124,31 @@ nsk: 55e505bf925e536c843a12ebc08c41ca5f4761eeeb7fa33725f0b44e6f1ac2e4
vsk: 30f798893977a7b7263d1f77abf58e11e014428c92030d6a02fe363cceb41ffa
```
6. Disconnect (unpair and clear saved pairing)
```bash
wallet keycard disconnect
# Output:
Keycard PIN:
✅ Keycard unpaired and pairing cleared.
```
### Pinata (testnet)
| Command | Key-path options | Description |
|-----------------------|-------------------------------|--------------------------------------------------------------------------|
| `wallet pinata claim` | `--key-path` | Claims a testnet pinata reward to a public or private recipient account |
| Command | Description |
|-----------------------|--------------------------------------------------------------------------|
| `wallet pinata claim` | Claims a testnet pinata reward to a public or private recipient account |
Note: The recipient account must be initialized with `wallet auth-transfer init` before claiming.
`--to` accepts any of:
- A BIP32 key path — uses Keycard (e.g. `m/44'/60'/0'/0/0`)
- An account ID with privacy prefix (e.g. `Public/9bKm...`)
- An account label (e.g. `my-account`)
1. Claim to a Keycard public account
```bash
wallet pinata claim --key-path "m/44'/60'/0'/0/0"
wallet pinata claim --to "m/44'/60'/0'/0/0"
# Output:
Keycard PIN:
@ -98,7 +159,7 @@ Transaction hash is fd320c01f5469e62d2486afa1d9d5be39afcca0cd01d1575905b7acd95cf
2. Claim to a local wallet account by label
```bash
wallet pinata claim --to-label my-account
wallet pinata claim --to my-account
# Output:
Transaction hash is 2c8a4f1e903d5b76e80214c5b82e1d46a105e28930ad71bcce48f2d07b49a16f
@ -106,16 +167,21 @@ Transaction hash is 2c8a4f1e903d5b76e80214c5b82e1d46a105e28930ad71bcce48f2d07b49
### Authenticated-transfer program
| Command | Key-path options | Description |
|-----------------------------|--------------------------------------|------------------------------------------------------------------------------------|
| `wallet auth-transfer init` | `--key-path` | Registers a public or private account with the auth-transfer program |
| `wallet auth-transfer send` | `--from-key-path`, `--to-key-path` | Sends native tokens; either or both endpoints can be Keycard public accounts |
| Command | Description |
|-----------------------------|-------------------------------------------------------------------------------|
| `wallet auth-transfer init` | Registers an account with the auth-transfer program |
| `wallet auth-transfer send` | Sends native tokens between accounts |
For `send`, `--from-key-path` and `--to-key-path` can be used together (both Keycard) or individually (one Keycard, one local/label). Shielded sends to foreign private accounts use `--to-npk`/`--to-vpk` instead of `--to-key-path`.
`--account` (for `init`) and `--from`/`--to` (for `send`) each accept any of:
- A BIP32 key path — uses Keycard (e.g. `m/44'/60'/0'/0/0`)
- An account ID with privacy prefix (e.g. `Public/9bKm...`)
- An account label (e.g. `my-account`)
For `send`, foreign recipient accounts (not in the local wallet and not a Keycard path) do not need to sign — pass their account ID directly via `--to`. Shielded sends to foreign private accounts use `--to-npk`/`--to-vpk`.
1. Initialize a Keycard public account
```bash
wallet auth-transfer init --key-path "m/44'/60'/0'/0/0"
wallet auth-transfer init --account "m/44'/60'/0'/0/0"
# Output:
Keycard PIN:
@ -125,8 +191,8 @@ Transaction hash is 49c16940493e1618c393645c1211b5c793d405838221c29ac6562a8a4b11
2. Send native tokens between two Keycard accounts
```bash
wallet auth-transfer send \
--from-key-path "m/44'/60'/0'/0/0" \
--to-key-path "m/44'/60'/0'/0/1" \
--from "m/44'/60'/0'/0/0" \
--to "m/44'/60'/0'/0/1" \
--amount 40
# Output:
@ -134,15 +200,26 @@ Keycard PIN:
Transaction hash is 1a9764ab20763dcc1ffb51c6e9badd5a6316a773759032ca48e0eee59caaf488
```
3. Send native tokens from Keycard to a local wallet account
3. Send native tokens from a Keycard account to a foreign account
```bash
# Note: non-keycard account ID below — replace with actual account ID or use --to-label
wallet auth-transfer send \
--from-key-path "m/44'/60'/0'/0/0" \
--to "Public/9bKmZ4n7PqVRxEtY3dWsQjA2cHrFT5LpDoGXM8wJuNv6" \
--from "m/44'/60'/0'/0/0" \
--to "Public/9bKmZ4n7PqVRxEtY3dWsQjA2cHrFT5LpDoGXM8wJuNv6" \
--amount 20
# Output:
Keycard PIN:
Transaction hash is 3e7b2a91cf804d56fe19084b3c8b25d07e8f243829bc50addf6e2c78b4b09d34
```
4. Send native tokens from a Keycard account to a local wallet account by label
```bash
wallet auth-transfer send \
--from "m/44'/60'/0'/0/0" \
--to my-account \
--amount 20
# Output:
Keycard PIN:
Transaction hash is 7d4c1b8e2f903a56fd19084b3c8b25d07e8f243829bc50addf6e2c78b4b09e45
```

View File

@ -50,8 +50,8 @@ async fn main() {
// Load signing keys to provide authorization
let signing_key = wallet_core
.storage()
.user_data
.get_pub_account_signing_key(account_id)
.key_chain()
.pub_account_signing_key(account_id)
.expect("Input account should be a self owned public account");
// Define the desired greeting in ASCII

View File

@ -86,7 +86,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result<Block, ServerFnError>
/// Get latest block ID
#[server]
pub async fn get_latest_block_id() -> Result<BlockId, ServerFnError> {
pub async fn get_latest_block_id() -> Result<Option<BlockId>, ServerFnError> {
use indexer_service_rpc::RpcClient as _;
let client = expect_context::<IndexerRpcClient>();
client

View File

@ -2,7 +2,9 @@
description = "Logos Execution Zone";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
logos-liblogos.url = "github:logos-co/logos-liblogos";
nixpkgs.follows = "logos-liblogos/nixpkgs";
rust-overlay = {
url = "github:oxalica/rust-overlay";
@ -139,7 +141,7 @@
cargoExtraArgs = "-p indexer_ffi";
postInstall = ''
mkdir -p $out/include
cp indexer_ffi/indexer_ffi.h $out/include/
cp indexer/ffi/indexer_ffi.h $out/include/
''
+ pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
install_name_tool -id @rpath/libindexer_ffi.dylib $out/lib/libindexer_ffi.dylib

View File

@ -9,7 +9,7 @@ workspace = true
[dependencies]
common.workspace = true
bedrock_client.workspace = true
logos-blockchain-zone-sdk.workspace = true
nssa.workspace = true
nssa_core.workspace = true
storage.workspace = true
@ -19,13 +19,14 @@ anyhow.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
tokio.workspace = true
borsh.workspace = true
futures.workspace = true
url.workspace = true
logos-blockchain-core.workspace = true
serde_json.workspace = true
async-stream.workspace = true
tokio.workspace = true
[dev-dependencies]
tempfile.workspace = true
authenticated_transfer_core.workspace = true

View File

@ -1,11 +1,13 @@
use std::{path::Path, sync::Arc};
use anyhow::Result;
use bedrock_client::HeaderId;
use anyhow::{Context as _, Result};
use common::{
block::{BedrockStatus, Block},
transaction::{NSSATransaction, clock_invocation},
};
use log::info;
use logos_blockchain_core::{header::HeaderId, mantle::ops::channel::MsgId};
use logos_blockchain_zone_sdk::Slot;
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
use storage::indexer::RocksDBIO;
@ -20,14 +22,10 @@ pub struct IndexerStore {
impl IndexerStore {
/// Starting database at the start of new chain.
/// Creates files if necessary.
///
/// ATTENTION: Will overwrite genesis block.
pub fn open_db_with_genesis(
location: &Path,
genesis_block: &Block,
initial_state: &V03State,
) -> Result<Self> {
let dbio = RocksDBIO::open_or_create(location, genesis_block, initial_state)?;
pub fn open_db(location: &Path) -> Result<Self> {
let initial_state = testnet_initial_state::initial_state();
let dbio = RocksDBIO::open_or_create(location, &initial_state)?;
let current_state = dbio.final_state()?;
Ok(Self {
@ -43,8 +41,8 @@ impl IndexerStore {
.map(HeaderId::from))
}
pub fn get_last_block_id(&self) -> Result<u64> {
Ok(self.dbio.get_meta_last_block_in_db()?)
pub fn get_last_block_id(&self) -> Result<Option<u64>> {
self.dbio.get_meta_last_block_id_in_db().map_err(Into::into)
}
pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>> {
@ -85,24 +83,36 @@ impl IndexerStore {
Ok(self.dbio.get_acc_transactions(acc_id, offset, limit)?)
}
#[must_use]
pub fn genesis_id(&self) -> u64 {
pub fn genesis_id(&self) -> Result<Option<u64>> {
self.dbio
.get_meta_first_block_in_db()
.expect("Must be set at the DB startup")
.get_meta_first_block_id_in_db()
.map_err(Into::into)
}
#[must_use]
pub fn last_block(&self) -> u64 {
self.dbio
.get_meta_last_block_in_db()
.expect("Must be set at the DB startup")
pub fn last_block(&self) -> Result<Option<u64>> {
self.dbio.get_meta_last_block_id_in_db().map_err(Into::into)
}
pub fn get_state_at_block(&self, block_id: u64) -> Result<V03State> {
Ok(self.dbio.calculate_state_for_id(block_id)?)
}
pub fn get_zone_cursor(&self) -> Result<Option<(MsgId, Slot)>> {
let Some(bytes) = self.dbio.get_zone_sdk_indexer_cursor_bytes()? else {
return Ok(None);
};
let cursor: (MsgId, Slot) = serde_json::from_slice(&bytes)
.context("Failed to deserialize stored zone-sdk indexer cursor")?;
Ok(Some(cursor))
}
pub fn set_zone_cursor(&self, cursor: &(MsgId, Slot)) -> Result<()> {
let bytes =
serde_json::to_vec(cursor).context("Failed to serialize zone-sdk indexer cursor")?;
self.dbio.put_zone_sdk_indexer_cursor_bytes(&bytes)?;
Ok(())
}
/// Recalculation of final state directly from DB.
///
/// Used for indexer healthcheck.
@ -118,7 +128,14 @@ impl IndexerStore {
.get_account_by_id(*account_id))
}
pub fn account_state_at_block(&self, account_id: &AccountId, block_id: u64) -> Result<Account> {
Ok(self
.get_state_at_block(block_id)?
.get_account_by_id(*account_id))
}
pub async fn put_block(&self, mut block: Block, l1_header: HeaderId) -> Result<()> {
info!("Applying block {}", block.header.block_id);
{
let mut state_guard = self.current_state.write().await;
@ -133,15 +150,33 @@ impl IndexerStore {
"Last transaction in block must be the clock invocation for the block timestamp"
);
let is_genesis = block.header.block_id == 1;
for transaction in user_txs {
transaction
.clone()
.transaction_stateless_check()?
.execute_check_on_state(
&mut state_guard,
block.header.block_id,
block.header.timestamp,
)?;
if is_genesis {
let genesis_tx = match transaction {
NSSATransaction::Public(public_tx) => public_tx,
NSSATransaction::PrivacyPreserving(_)
| NSSATransaction::ProgramDeployment(_) => {
anyhow::bail!("Genesis block should contain only public transactions")
}
};
state_guard
.transition_from_public_transaction(
genesis_tx,
block.header.block_id,
block.header.timestamp,
)
.context("Failed to execute genesis public transaction")?;
} else {
transaction
.clone()
.transaction_stateless_check()?
.execute_check_on_state(
&mut state_guard,
block.header.block_id,
block.header.timestamp,
)?;
}
}
// Apply the clock invocation directly (it is expected to modify clock accounts).
@ -160,104 +195,131 @@ impl IndexerStore {
// to represent correct block finality
block.bedrock_status = BedrockStatus::Finalized;
info!("Putting block {} into DB", block.header.block_id);
Ok(self.dbio.put_block(&block, l1_header.into())?)
}
}
#[cfg(test)]
mod tests {
use nssa::{AccountId, PublicKey};
use common::{HashType, block::HashableBlockData};
use tempfile::tempdir;
use testnet_initial_state::initial_pub_accounts_private_keys;
use super::*;
fn genesis_block() -> Block {
common::test_utils::produce_dummy_block(1, None, vec![])
}
fn acc1_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([1; 32]).unwrap()
}
fn acc2_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([2; 32]).unwrap()
}
fn acc1() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc1_sign_key()))
}
fn acc2() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc2_sign_key()))
}
#[test]
fn correct_startup() {
let home = tempdir().unwrap();
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(
&[(acc1(), 10000), (acc2(), 20000)],
vec![],
0,
),
)
.unwrap();
let storage = IndexerStore::open_db(home.as_ref()).unwrap();
let block = storage.get_block_at_id(1).unwrap().unwrap();
let final_id = storage.get_last_block_id().unwrap();
assert_eq!(block.header.hash, genesis_block().header.hash);
assert_eq!(final_id, 1);
assert_eq!(final_id, None);
}
#[tokio::test]
async fn state_transition() {
let home = tempdir().unwrap();
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(
&[(acc1(), 10000), (acc2(), 20000)],
vec![],
0,
),
)
.unwrap();
let storage = IndexerStore::open_db(home.as_ref()).unwrap();
let mut prev_hash = genesis_block().header.hash;
let initial_accounts = initial_pub_accounts_private_keys();
let from = initial_accounts[0].account_id;
let to = initial_accounts[1].account_id;
let sign_key = initial_accounts[0].pub_sign_key.clone();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
// Submit genesis block
let clock_tx = NSSATransaction::Public(clock_invocation(0));
let genesis_block_data = HashableBlockData {
block_id: 1,
prev_block_hash: HashType::default(),
timestamp: 0,
transactions: vec![clock_tx],
};
let genesis_block = genesis_block_data.into_pending_block(
&common::test_utils::sequencer_sign_key_for_testing(),
[0; 32],
);
let mut prev_hash = Some(genesis_block.header.hash);
storage
.put_block(genesis_block, HeaderId::from([0_u8; 32]))
.await
.unwrap();
for i in 2..10 {
for i in 0..10 {
let tx = common::test_utils::create_transaction_native_token_transfer(
from,
i - 2,
to,
10,
&sign_key,
from, i, to, 10, &sign_key,
);
let block_id = u64::try_from(i).unwrap();
let block_id = u64::try_from(i + 1).unwrap();
let next_block =
common::test_utils::produce_dummy_block(block_id, Some(prev_hash), vec![tx]);
prev_hash = next_block.header.hash;
let next_block = common::test_utils::produce_dummy_block(block_id, prev_hash, vec![tx]);
prev_hash = Some(next_block.header.hash);
storage
.put_block(next_block, HeaderId::from([u8::try_from(i).unwrap(); 32]))
.put_block(
next_block,
HeaderId::from([u8::try_from(i + 1).unwrap(); 32]),
)
.await
.unwrap();
}
let acc1_val = storage.account_current_state(&acc1()).await.unwrap();
let acc2_val = storage.account_current_state(&acc2()).await.unwrap();
let acc1_val = storage.account_current_state(&from).await.unwrap();
let acc2_val = storage.account_current_state(&to).await.unwrap();
assert_eq!(acc1_val.balance, 9920);
assert_eq!(acc2_val.balance, 20080);
assert_eq!(acc1_val.balance, 9900);
assert_eq!(acc2_val.balance, 20100);
}
#[tokio::test]
async fn account_state_at_block() {
let home = tempdir().unwrap();
let storage = IndexerStore::open_db(home.as_ref()).unwrap();
let mut prev_hash = None;
let initial_accounts = initial_pub_accounts_private_keys();
let from = initial_accounts[0].account_id;
let to = initial_accounts[1].account_id;
let sign_key = initial_accounts[0].pub_sign_key.clone();
for i in 0..10 {
let tx = common::test_utils::create_transaction_native_token_transfer(
from, i, to, 10, &sign_key,
);
let block_id = u64::try_from(i + 1).unwrap();
let next_block = common::test_utils::produce_dummy_block(block_id, prev_hash, vec![tx]);
prev_hash = Some(next_block.header.hash);
storage
.put_block(
next_block,
HeaderId::from([u8::try_from(i + 1).unwrap(); 32]),
)
.await
.unwrap();
}
// Genesis block: no transfers applied yet.
let acc1_at_1 = storage.account_state_at_block(&from, 1).unwrap();
let acc2_at_1 = storage.account_state_at_block(&to, 1).unwrap();
assert_eq!(acc1_at_1.balance, 9990);
assert_eq!(acc2_at_1.balance, 20010);
// After block 5: 4 transfers of 10 applied (one each in blocks 2..=5).
let acc1_at_5 = storage.account_state_at_block(&from, 5).unwrap();
let acc2_at_5 = storage.account_state_at_block(&to, 5).unwrap();
assert_eq!(acc1_at_5.balance, 9950);
assert_eq!(acc2_at_5.balance, 20050);
// After final block 9: 8 transfers applied; should match current state.
let acc1_at_9 = storage.account_state_at_block(&from, 9).unwrap();
let acc2_at_9 = storage.account_state_at_block(&to, 9).unwrap();
assert_eq!(acc1_at_9.balance, 9910);
assert_eq!(acc2_at_9.balance, 20090);
}
}

View File

@ -6,18 +6,14 @@ use std::{
};
use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::config::BasicAuth;
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>,
@ -25,18 +21,12 @@ pub struct ClientConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IndexerConfig {
/// Home dir of sequencer storage.
/// Home dir of indexer storage.
pub home: PathBuf,
/// Sequencers signing key.
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub bedrock_config: ClientConfig,
pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_private_accounts: Option<Vec<PrivateAccountPublicInitialData>>,
}
impl IndexerConfig {

View File

@ -1,18 +1,14 @@
use std::collections::VecDeque;
use std::sync::Arc;
use anyhow::Result;
use bedrock_client::{BedrockClient, HeaderId};
use common::{
HashType, PINATA_BASE58,
block::{Block, HashableBlockData},
use common::block::Block;
// ToDo: Remove after testnet
use futures::StreamExt as _;
use log::{error, info, warn};
use logos_blockchain_core::header::HeaderId;
use logos_blockchain_zone_sdk::{
CommonHttpClient, ZoneMessage, adapter::NodeHttpClient, indexer::ZoneIndexer,
};
use log::{debug, error, info};
use logos_blockchain_core::mantle::{
Op, SignedMantleTx,
ops::channel::{ChannelId, inscribe::InscriptionOp},
};
use nssa::V03State;
use testnet_initial_state::initial_state_testnet;
use crate::{block_store::IndexerStore, config::IndexerConfig};
@ -21,365 +17,97 @@ pub mod config;
#[derive(Clone)]
pub struct IndexerCore {
pub bedrock_client: BedrockClient,
pub zone_indexer: Arc<ZoneIndexer<NodeHttpClient>>,
pub config: IndexerConfig,
pub store: IndexerStore,
}
#[derive(Clone)]
/// This struct represents one L1 block data fetched from backfilling.
pub struct BackfillBlockData {
l2_blocks: Vec<Block>,
l1_header: HeaderId,
}
#[derive(Clone)]
/// This struct represents data fetched fom backfilling in one iteration.
pub struct BackfillData {
block_data: VecDeque<BackfillBlockData>,
curr_fin_l1_lib_header: HeaderId,
}
impl IndexerCore {
pub fn new(config: IndexerConfig) -> Result<Self> {
let hashable_data = HashableBlockData {
block_id: 1,
transactions: vec![],
prev_block_hash: HashType([0; 32]),
timestamp: 0,
};
// Genesis creation is fine as it is,
// because it will be overwritten by sequencer.
// Therefore:
// ToDo: remove key from indexer config, use some default.
let signing_key = nssa::PrivateKey::try_new(config.signing_key).unwrap();
let channel_genesis_msg_id = [0; 32];
let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
let initial_private_accounts: Option<Vec<(nssa_core::Commitment, nssa_core::Nullifier)>> =
config.initial_private_accounts.as_ref().map(|accounts| {
accounts
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let account_id = nssa::AccountId::from((npk, 0));
let mut acc = init_comm_data.account.clone();
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
(
nssa_core::Commitment::new(&account_id, &acc),
nssa_core::Nullifier::for_account_initialization(&account_id),
)
})
.collect()
});
let init_accs: Option<Vec<(nssa::AccountId, u128)>> = config
.initial_public_accounts
.as_ref()
.map(|initial_accounts| {
initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect()
});
// If initial commitments or accounts are present in config, need to construct state from
// them
let state = if initial_private_accounts.is_some() || init_accs.is_some() {
let mut state = V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
initial_private_accounts.unwrap_or_default(),
genesis_block.header.timestamp,
);
// ToDo: Remove after testnet
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
state
} else {
initial_state_testnet()
};
let home = config.home.join("rocksdb");
let basic_auth = config.bedrock_config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(
CommonHttpClient::new(basic_auth),
config.bedrock_config.addr.clone(),
);
let zone_indexer = ZoneIndexer::new(config.channel_id, node);
Ok(Self {
bedrock_client: BedrockClient::new(
config.bedrock_client_config.backoff,
config.bedrock_client_config.addr.clone(),
config.bedrock_client_config.auth.clone(),
)?,
zone_indexer: Arc::new(zone_indexer),
config,
store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?,
store: IndexerStore::open_db(&home)?,
})
}
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> {
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> + '_ {
let poll_interval = self.config.consensus_info_polling_interval;
let initial_cursor = self
.store
.get_zone_cursor()
.expect("Failed to load zone-sdk indexer cursor");
async_stream::stream! {
info!("Searching for initial header");
let mut cursor = initial_cursor;
let last_stored_l1_lib_header = self.store.last_observed_l1_lib_header()?;
let mut prev_last_l1_lib_header = if let Some(last_l1_lib_header) = last_stored_l1_lib_header {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
if cursor.is_some() {
info!("Resuming indexer from cursor {cursor:?}");
} else {
info!("Last l1 lib header not found in DB");
info!("Searching for the start of a channel");
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self
.store
.put_block(l2_block.clone(), l1_header)
.await
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
}
yield Ok(l2_block);
}
}
last_l1_lib_header
};
info!("Searching for initial header finished");
info!("Starting backfilling from {prev_last_l1_lib_header}");
info!("Starting indexer from beginning of channel");
}
loop {
let BackfillData {
block_data: buff,
curr_fin_l1_lib_header,
} = self
.backfill_to_last_l1_lib_header_id(prev_last_l1_lib_header, &self.config.channel_id)
.await
.inspect_err(|err| error!("Failed to backfill to last l1 lib header id with err {err:#?}"))?;
prev_last_l1_lib_header = curr_fin_l1_lib_header;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header: header,
} in buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
self.store.put_block(l2_block.clone(), header).await?;
yield Ok(l2_block);
let stream = match self.zone_indexer.next_messages(cursor).await {
Ok(s) => s,
Err(err) => {
error!("Failed to start zone-sdk next_messages stream: {err}");
tokio::time::sleep(poll_interval).await;
continue;
}
}
}
}
}
async fn get_lib(&self) -> Result<HeaderId> {
Ok(self.bedrock_client.get_consensus_info().await?.lib)
}
async fn get_next_lib(&self, prev_lib: HeaderId) -> Result<HeaderId> {
loop {
let next_lib = self.get_lib().await?;
if next_lib == prev_lib {
info!(
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
} else {
break Ok(next_lib);
}
}
}
/// WARNING: depending on channel state,
/// may take indefinite amount of time.
pub async fn search_for_channel_start(&self) -> Result<BackfillData> {
let mut curr_last_l1_lib_header = self.get_lib().await?;
let mut backfill_start = curr_last_l1_lib_header;
// ToDo: How to get root?
let mut backfill_limit = HeaderId::from([0; 32]);
// ToDo: Not scalable, initial buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
'outer: loop {
let mut cycle_header = curr_last_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await?
else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
};
let mut stream = std::pin::pin!(stream);
// It would be better to have id, but block does not have it, so slot will do.
info!(
"INITIAL SEARCH: Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
debug!(
"INITIAL SEARCH: This block header is {}",
cycle_block.header().id()
);
debug!(
"INITIAL SEARCH: This block parent is {}",
cycle_block.header().parent()
);
while let Some((msg, slot)) = stream.next().await {
let zone_block = match msg {
ZoneMessage::Block(b) => b,
// Non-block messages don't carry a cursor position; the
// next ZoneBlock advances past them implicitly.
ZoneMessage::Deposit(_) | ZoneMessage::Withdraw(_) => continue,
};
let (l2_block_vec, l1_header) =
parse_block_owned(&cycle_block, &self.config.channel_id);
let block: Block = match borsh::from_slice(&zone_block.data) {
Ok(b) => b,
Err(e) => {
error!("Failed to deserialize L2 block from zone-sdk: {e}");
// Advance past the broken inscription so we don't
// re-process it on restart.
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
continue;
}
};
info!("Parsed {} L2 blocks", l2_block_vec.len());
info!("Indexed L2 block {}", block.header.block_id);
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec.clone(),
l1_header,
});
}
if let Some(first_l2_block) = l2_block_vec.first()
&& first_l2_block.header.block_id == 1
{
info!("INITIAL_SEARCH: Found channel start");
break 'outer;
}
// Step back to parent
let parent = cycle_block.header().parent();
if parent == backfill_limit {
break;
}
cycle_header = parent;
}
info!("INITIAL_SEARCH: Reached backfill limit, refetching last l1 lib header");
block_buffer.clear();
backfill_limit = backfill_start;
curr_last_l1_lib_header = self.get_next_lib(curr_last_l1_lib_header).await?;
backfill_start = curr_last_l1_lib_header;
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header: curr_last_l1_lib_header,
})
}
pub async fn backfill_to_last_l1_lib_header_id(
&self,
last_fin_l1_lib_header: HeaderId,
channel_id: &ChannelId,
) -> Result<BackfillData> {
let curr_fin_l1_lib_header = self.get_next_lib(last_fin_l1_lib_header).await?;
// ToDo: Not scalable, buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
let mut cycle_header = curr_fin_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await? else {
return Err(anyhow::anyhow!("Parent not found"));
};
if cycle_block.header().id() == last_fin_l1_lib_header {
break;
}
// Step back to parent
cycle_header = cycle_block.header().parent();
// It would be better to have id, but block does not have it, so slot will do.
info!(
"Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
let (l2_block_vec, l1_header) = parse_block_owned(&cycle_block, channel_id);
info!("Parsed {} L2 blocks", l2_block_vec.len());
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
});
}
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header,
})
}
}
fn parse_block_owned(
l1_block: &bedrock_client::Block<SignedMantleTx>,
decoded_channel_id: &ChannelId,
) -> (Vec<Block>, HeaderId) {
(
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We are only interested in channel inscription ops, so it's fine to ignore the rest"
)]
l1_block
.transactions()
.flat_map(|tx| {
tx.mantle_tx.ops.iter().filter_map(|op| match op {
Op::ChannelInscribe(InscriptionOp {
channel_id,
inscription,
..
}) if channel_id == decoded_channel_id => {
borsh::from_slice::<Block>(inscription)
.inspect_err(|err| {
error!("Failed to deserialize our inscription with err: {err:#?}");
})
.ok()
// TODO: Remove l1_header placeholder once storage layer
// no longer requires it. Zone-sdk handles L1 tracking internally.
let placeholder_l1_header = HeaderId::from([0_u8; 32]);
if let Err(err) = self.store.put_block(block.clone(), placeholder_l1_header).await {
error!("Failed to store block {}: {err:#}", block.header.block_id);
}
_ => None,
})
})
.collect(),
l1_block.header().id(),
)
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
yield Ok(block);
}
// Stream ended (caught up to LIB). Sleep then poll again.
tokio::time::sleep(poll_interval).await;
}
}
}
}

32
indexer/ffi/Cargo.toml Normal file
View File

@ -0,0 +1,32 @@
[package]
edition = "2024"
license = { workspace = true }
name = "indexer_ffi"
version = "0.1.0"
[dependencies]
nssa.workspace = true
indexer_service.workspace = true
indexer_service_rpc = { workspace = true, features = ["client"] }
indexer_service_protocol.workspace = true
url.workspace = true
log = { workspace = true }
tokio = { features = ["rt-multi-thread"], workspace = true }
jsonrpsee.workspace = true
anyhow.workspace = true
[build-dependencies]
cbindgen = "0.29"
[lib]
crate-type = ["rlib", "cdylib", "staticlib"]
name = "indexer_ffi"
[lints]
workspace = true
[package.metadata.cargo-machete]
ignored = [
"cbindgen",
] # machete does not recognize this for build dep and complains.

12
indexer/ffi/build.rs Normal file
View File

@ -0,0 +1,12 @@
use std::env;
fn main() {
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rerun-if-changed=src/");
cbindgen::Builder::new()
.with_crate(crate_dir)
.with_language(cbindgen::Language::C)
.generate()
.expect("Unable to generate bindings")
.write_to_file("indexer_ffi.h");
}

View File

@ -0,0 +1,2 @@
language = "C" # For increased compatibility
no_includes = true

752
indexer/ffi/indexer_ffi.h Normal file
View File

@ -0,0 +1,752 @@
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
typedef enum OperationStatus {
Ok = 0,
NullPointer = 1,
InitializationError = 2,
ClientError = 3,
} OperationStatus;
typedef enum FfiTransactionKind {
Public = 0,
Private,
ProgramDeploy,
} FfiTransactionKind;
typedef enum FfiBedrockStatus {
Pending = 0,
Safe,
Finalized,
} FfiBedrockStatus;
typedef struct Option_u64 Option_u64;
typedef struct IndexerServiceFFI {
void *indexer_handle;
void *indexer_client;
} IndexerServiceFFI;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_IndexerServiceFFI__OperationStatus {
struct IndexerServiceFFI *value;
enum OperationStatus error;
} PointerResult_IndexerServiceFFI__OperationStatus;
typedef struct PointerResult_IndexerServiceFFI__OperationStatus InitializedIndexerServiceFFIResult;
typedef enum PointerKind_Tag {
Owned,
Borrowed,
Null,
} PointerKind_Tag;
typedef struct PointerKind {
PointerKind_Tag tag;
union {
struct {
void *owned;
};
struct {
const void *borrowed;
};
};
} PointerKind;
typedef struct Pointer_Runtime {
struct PointerKind kind;
} Pointer_Runtime;
/**
* Wrapper around [`tokio::runtime::Runtime`] that can be safely passed across the FFI boundary.
*/
typedef struct Runtime {
struct Pointer_Runtime inner;
} Runtime;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_Runtime__OperationStatus {
struct Runtime *value;
enum OperationStatus error;
} PointerResult_Runtime__OperationStatus;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_Option_u64_____OperationStatus {
struct Option_u64 *value;
enum OperationStatus error;
} PointerResult_Option_u64_____OperationStatus;
typedef uint64_t FfiBlockId;
/**
* 32-byte array type for `AccountId`, keys, hashes, etc.
*/
typedef struct FfiBytes32 {
uint8_t data[32];
} FfiBytes32;
typedef struct FfiBytes32 FfiHashType;
typedef uint64_t FfiTimestamp;
/**
* 64-byte array type for signatures, etc.
*/
typedef struct FfiBytes64 {
uint8_t data[64];
} FfiBytes64;
typedef struct FfiBytes64 FfiSignature;
typedef struct FfiBlockHeader {
FfiBlockId block_id;
FfiHashType prev_block_hash;
FfiHashType hash;
FfiTimestamp timestamp;
FfiSignature signature;
} FfiBlockHeader;
/**
* Program ID - 8 u32 values (32 bytes total).
*/
typedef struct FfiProgramId {
uint32_t data[8];
} FfiProgramId;
typedef struct FfiBytes32 FfiAccountId;
typedef struct FfiVec_FfiAccountId {
FfiAccountId *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiAccountId;
typedef struct FfiVec_FfiAccountId FfiAccountIdList;
/**
* U128 - 16 bytes little endian.
*/
typedef struct FfiU128 {
uint8_t data[16];
} FfiU128;
typedef struct FfiU128 FfiNonce;
typedef struct FfiVec_FfiNonce {
FfiNonce *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiNonce;
typedef struct FfiVec_FfiNonce FfiNonceList;
typedef struct FfiVec_u32 {
uint32_t *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_u32;
typedef struct FfiVec_u32 FfiInstructionDataList;
typedef struct FfiPublicMessage {
struct FfiProgramId program_id;
FfiAccountIdList account_ids;
FfiNonceList nonces;
FfiInstructionDataList instruction_data;
} FfiPublicMessage;
typedef struct FfiBytes32 FfiPublicKey;
typedef struct FfiSignaturePubKeyEntry {
FfiSignature signature;
FfiPublicKey public_key;
} FfiSignaturePubKeyEntry;
typedef struct FfiVec_FfiSignaturePubKeyEntry {
struct FfiSignaturePubKeyEntry *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiSignaturePubKeyEntry;
typedef struct FfiVec_FfiSignaturePubKeyEntry FfiSignaturePubKeyList;
typedef struct FfiPublicTransactionBody {
FfiHashType hash;
struct FfiPublicMessage message;
FfiSignaturePubKeyList witness_set;
} FfiPublicTransactionBody;
/**
* Account data structure - C-compatible version of nssa Account.
*
* Note: `balance` and `nonce` are u128 values represented as little-endian
* byte arrays since C doesn't have native u128 support.
*/
typedef struct FfiAccount {
struct FfiProgramId program_owner;
/**
* Balance as little-endian [u8; 16].
*/
struct FfiU128 balance;
/**
* Pointer to account data bytes.
*/
uint8_t *data;
/**
* Length of account data.
*/
uintptr_t data_len;
/**
* Capacity of account data.
*/
uintptr_t data_cap;
/**
* Nonce as little-endian [u8; 16].
*/
struct FfiU128 nonce;
} FfiAccount;
typedef struct FfiVec_FfiAccount {
struct FfiAccount *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiAccount;
typedef struct FfiVec_FfiAccount FfiAccountList;
typedef struct FfiVec_u8 {
uint8_t *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_u8;
typedef struct FfiVec_u8 FfiVecU8;
typedef struct FfiEncryptedAccountData {
FfiVecU8 ciphertext;
FfiVecU8 epk;
uint8_t view_tag;
} FfiEncryptedAccountData;
typedef struct FfiVec_FfiEncryptedAccountData {
struct FfiEncryptedAccountData *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiEncryptedAccountData;
typedef struct FfiVec_FfiEncryptedAccountData FfiEncryptedAccountDataList;
typedef struct FfiVec_FfiBytes32 {
struct FfiBytes32 *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiBytes32;
typedef struct FfiVec_FfiBytes32 FfiVecBytes32;
typedef struct FfiNullifierCommitmentSet {
struct FfiBytes32 nullifier;
struct FfiBytes32 commitment_set_digest;
} FfiNullifierCommitmentSet;
typedef struct FfiVec_FfiNullifierCommitmentSet {
struct FfiNullifierCommitmentSet *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiNullifierCommitmentSet;
typedef struct FfiVec_FfiNullifierCommitmentSet FfiNullifierCommitmentSetList;
typedef struct FfiPrivacyPreservingMessage {
FfiAccountIdList public_account_ids;
FfiNonceList nonces;
FfiAccountList public_post_states;
FfiEncryptedAccountDataList encrypted_private_post_states;
FfiVecBytes32 new_commitments;
FfiNullifierCommitmentSetList new_nullifiers;
uint64_t block_validity_window[2];
uint64_t timestamp_validity_window[2];
} FfiPrivacyPreservingMessage;
typedef FfiVecU8 FfiProof;
typedef struct FfiPrivateTransactionBody {
FfiHashType hash;
struct FfiPrivacyPreservingMessage message;
FfiSignaturePubKeyList witness_set;
FfiProof proof;
} FfiPrivateTransactionBody;
typedef FfiVecU8 FfiProgramDeploymentMessage;
typedef struct FfiProgramDeploymentTransactionBody {
FfiHashType hash;
FfiProgramDeploymentMessage message;
} FfiProgramDeploymentTransactionBody;
typedef struct FfiTransactionBody {
struct FfiPublicTransactionBody *public_body;
struct FfiPrivateTransactionBody *private_body;
struct FfiProgramDeploymentTransactionBody *program_deployment_body;
} FfiTransactionBody;
typedef struct FfiTransaction {
struct FfiTransactionBody body;
enum FfiTransactionKind kind;
} FfiTransaction;
typedef struct FfiVec_FfiTransaction {
struct FfiTransaction *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiTransaction;
typedef struct FfiVec_FfiTransaction FfiBlockBody;
typedef struct FfiBytes32 FfiMsgId;
typedef struct FfiBlock {
struct FfiBlockHeader header;
FfiBlockBody body;
enum FfiBedrockStatus bedrock_status;
FfiMsgId bedrock_parent_id;
} FfiBlock;
typedef struct FfiOption_FfiBlock {
struct FfiBlock *value;
bool is_some;
} FfiOption_FfiBlock;
typedef struct FfiOption_FfiBlock FfiBlockOpt;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiBlockOpt__OperationStatus {
FfiBlockOpt *value;
enum OperationStatus error;
} PointerResult_FfiBlockOpt__OperationStatus;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiAccount__OperationStatus {
struct FfiAccount *value;
enum OperationStatus error;
} PointerResult_FfiAccount__OperationStatus;
typedef struct FfiOption_FfiTransaction {
struct FfiTransaction *value;
bool is_some;
} FfiOption_FfiTransaction;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiOption_FfiTransaction_____OperationStatus {
struct FfiOption_FfiTransaction *value;
enum OperationStatus error;
} PointerResult_FfiOption_FfiTransaction_____OperationStatus;
typedef struct FfiVec_FfiBlock {
struct FfiBlock *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiBlock;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiVec_FfiBlock_____OperationStatus {
struct FfiVec_FfiBlock *value;
enum OperationStatus error;
} PointerResult_FfiVec_FfiBlock_____OperationStatus;
typedef struct FfiOption_u64 {
uint64_t *value;
bool is_some;
} FfiOption_u64;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiVec_FfiTransaction_____OperationStatus {
struct FfiVec_FfiTransaction *value;
enum OperationStatus error;
} PointerResult_FfiVec_FfiTransaction_____OperationStatus;
/**
* Creates and starts an indexer based on the provided
* configuration file path.
*
* # Arguments
*
* - `config_path`: A pointer to a string representing the path to the configuration file.
* - `port`: Number representing a port, on which indexers RPC will start.
*
* # Returns
*
* An `InitializedIndexerServiceFFIResult` containing either a pointer to the
* initialized `IndexerServiceFFI` or an error code.
*
* # Safety
* The caller must ensure that:
* - `runtime` is a valid pointer to a `tokio::runtime::Runtime` instance.
* - `config_path` is a valid pointer to a null-terminated C string.
*/
InitializedIndexerServiceFFIResult start_indexer(const struct Runtime *runtime,
const char *config_path,
uint16_t port);
/**
* Creates a new [`tokio::runtime::Runtime`].
*/
struct PointerResult_Runtime__OperationStatus new_runtime(void);
/**
* Stops and frees the resources associated with the given indexer service.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
*
* # Returns
*
* An `OperationStatus` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
* - The `IndexerServiceFFI` instance was created by this library
* - The pointer will not be used after this function returns
*/
enum OperationStatus stop_indexer(struct IndexerServiceFFI *indexer);
/**
* # Safety
* It's up to the caller to pass a proper pointer, if somehow from c/c++ side
* this is called with a type which doesn't come from a returned `CString` it
* will cause a segfault.
*/
void free_cstring(char *block);
/**
* Query the last block id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
*
* # Returns
*
* A `PointerResult<Option<u64>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `runtime` is a valid pointer to a [`Runtime`] instance.
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
*/
struct PointerResult_Option_u64_____OperationStatus query_last_block(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer);
/**
* Query the block by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `block_id`: `u64` number of block id
*
* # Returns
*
* A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `runtime` is a valid pointer to a [`Runtime`] instance.
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
*/
struct PointerResult_FfiBlockOpt__OperationStatus query_block(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
FfiBlockId block_id);
/**
* Query the block by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `hash`: `FfiHashType` - hash of block
*
* # Returns
*
* A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `runtime` is a valid pointer to a [`Runtime`] instance.
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
*/
struct PointerResult_FfiBlockOpt__OperationStatus query_block_by_hash(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
FfiHashType hash);
/**
* Query the account by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `account_id`: `FfiAccountId` - id of queried account
*
* # Returns
*
* A `PointerResult<FfiAccount, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `runtime` is a valid pointer to a [`Runtime`] instance.
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
*/
struct PointerResult_FfiAccount__OperationStatus query_account(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
FfiAccountId account_id);
/**
* Query the trasnaction by hash from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `hash`: `FfiHashType` - hash of transaction
*
* # Returns
*
* A `PointerResult<FfiOption<FfiTransaction>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
* - `runtime` is a valid pointer to a [`Runtime`] instance.
*/
struct PointerResult_FfiOption_FfiTransaction_____OperationStatus query_transaction(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
FfiHashType hash);
/**
* Query the blocks by block range from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `before`: `FfiOption<u64>` - end block of query
* - `limit`: `u64` - number of blocks to query before `before`
*
* # Returns
*
* A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
* - `runtime` is a valid pointer to a [`Runtime`] instance.
*/
struct PointerResult_FfiVec_FfiBlock_____OperationStatus query_block_vec(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
struct FfiOption_u64 before,
uint64_t limit);
/**
* Query the transactions range by account id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
* - `account_id`: `FfiAccountId` - id of queried account
* - `offset`: `u64` - first tx id of query
* - `limit`: `u64` - number of tx ids to query after `offset`
*
* # Returns
*
* A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
* - `runtime` is a valid pointer to a [`Runtime`] instance.
*/
struct PointerResult_FfiVec_FfiTransaction_____OperationStatus query_transactions_by_account(const struct Runtime *runtime,
const struct IndexerServiceFFI *indexer,
FfiAccountId account_id,
uint64_t offset,
uint64_t limit);
/**
* Frees the resources associated with the given ffi account.
*
* # Arguments
*
* - `val`: An instance of `FfiAccount`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiAccount`.
*/
void free_ffi_account(struct FfiAccount val);
/**
* Frees the resources associated with the given ffi block.
*
* # Arguments
*
* - `val`: An instance of `FfiBlock`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiBlock`.
*/
void free_ffi_block(struct FfiBlock val);
/**
* Frees the resources associated with the given ffi block option.
*
* # Arguments
*
* - `val`: An instance of `FfiBlockOpt`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiBlockOpt`.
*/
void free_ffi_block_opt(FfiBlockOpt val);
/**
* Frees the resources associated with the given ffi block vector.
*
* # Arguments
*
* - `val`: An instance of `FfiVec<FfiBlock>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiVec<FfiBlock>`.
*/
void free_ffi_block_vec(struct FfiVec_FfiBlock val);
/**
* Frees the resources associated with the given ffi transaction.
*
* # Arguments
*
* - `val`: An instance of `FfiTransaction`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiTransaction`.
*/
void free_ffi_transaction(struct FfiTransaction val);
/**
* Frees the resources associated with the given ffi transaction option.
*
* # Arguments
*
* - `val`: An instance of `FfiOption<FfiTransaction>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiOption<FfiTransaction>`.
*/
void free_ffi_transaction_opt(struct FfiOption_FfiTransaction val);
/**
* Frees the resources associated with the given vector of ffi transactions.
*
* # Arguments
*
* - `val`: An instance of `FfiVec<FfiTransaction>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiVec<FfiTransaction>`.
*/
void free_ffi_transaction_vec(struct FfiVec_FfiTransaction val);
bool is_ok(const enum OperationStatus *self);
bool is_error(const enum OperationStatus *self);

View File

@ -0,0 +1,36 @@
use std::net::SocketAddr;
use url::Url;
use crate::OperationStatus;
#[derive(Debug, Clone, Copy)]
pub enum UrlProtocol {
Http,
Ws,
}
impl std::fmt::Display for UrlProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Http => write!(f, "http"),
Self::Ws => write!(f, "ws"),
}
}
}
pub(crate) fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url, OperationStatus> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>
// but clients need to connect to 127.0.0.1:<port> to work reliably
let url_string = if addr.ip().is_unspecified() {
format!("{protocol}://127.0.0.1:{}", addr.port())
} else {
format!("{protocol}://{addr}")
};
url_string.parse().map_err(|e| {
log::error!("Could not parse indexer url: {e}");
OperationStatus::InitializationError
})
}

View File

@ -0,0 +1,138 @@
use std::{ffi::c_char, path::PathBuf};
use crate::{
IndexerServiceFFI, Runtime,
api::{
PointerResult,
client::{UrlProtocol, addr_to_url},
},
client::{IndexerClient, IndexerClientTrait as _},
errors::OperationStatus,
};
pub type InitializedIndexerServiceFFIResult = PointerResult<IndexerServiceFFI, OperationStatus>;
/// Creates and starts an indexer based on the provided
/// configuration file path.
///
/// # Arguments
///
/// - `config_path`: A pointer to a string representing the path to the configuration file.
/// - `port`: Number representing a port, on which indexers RPC will start.
///
/// # Returns
///
/// An `InitializedIndexerServiceFFIResult` containing either a pointer to the
/// initialized `IndexerServiceFFI` or an error code.
///
/// # Safety
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a `tokio::runtime::Runtime` instance.
/// - `config_path` is a valid pointer to a null-terminated C string.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn start_indexer(
runtime: *const Runtime,
config_path: *const c_char,
port: u16,
) -> InitializedIndexerServiceFFIResult {
// SAFETY: The caller must ensure the validness of the `runtime` and `config_path` pointers.
unsafe { setup_indexer(runtime, config_path, port) }.map_or_else(
InitializedIndexerServiceFFIResult::from_error,
InitializedIndexerServiceFFIResult::from_value,
)
}
/// Creates a new [`tokio::runtime::Runtime`].
#[unsafe(no_mangle)]
pub extern "C" fn new_runtime() -> PointerResult<Runtime, OperationStatus> {
Runtime::new().map_or_else(
|_e| PointerResult::from_error(OperationStatus::InitializationError),
PointerResult::from_value,
)
}
/// Initializes and starts an indexer based on the provided
/// configuration file path.
///
/// # Arguments
///
/// - `config_path`: A pointer to a string representing the path to the configuration file.
/// - `port`: Number representing a port, on which indexers RPC will start.
///
/// # Returns
///
/// A `Result` containing either the initialized `IndexerServiceFFI` or an
/// error code.
///
/// # Safety
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a `tokio::runtime::Runtime` instance.
/// - `config_path` is a valid pointer to a null-terminated C string.
unsafe fn setup_indexer(
runtime: *const Runtime,
config_path: *const c_char,
port: u16,
) -> Result<IndexerServiceFFI, OperationStatus> {
let user_config_path = PathBuf::from(
unsafe { std::ffi::CStr::from_ptr(config_path) }
.to_str()
.map_err(|e| {
log::error!("Could not convert the config path to string: {e}");
OperationStatus::InitializationError
})?,
);
let config = indexer_service::IndexerConfig::from_path(&user_config_path).map_err(|e| {
log::error!("Failed to read config: {e}");
OperationStatus::InitializationError
})?;
// SAFETY: The caller must ensure that `runtime` is a valid pointer to a
// `tokio::runtime::Runtime` instance.
let runtime = unsafe { &*runtime };
let indexer_handle = runtime
.block_on(indexer_service::run_server(config, port))
.map_err(|e| {
log::error!("Could not start indexer service: {e}");
OperationStatus::InitializationError
})?;
let indexer_url = addr_to_url(UrlProtocol::Ws, indexer_handle.addr())?;
let indexer_client = runtime
.block_on(IndexerClient::new(&indexer_url))
.map_err(|e| {
log::error!("Could not start indexer client: {e}");
OperationStatus::InitializationError
})?;
Ok(IndexerServiceFFI::new(indexer_handle, indexer_client))
}
/// Stops and frees the resources associated with the given indexer service.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
///
/// # Returns
///
/// An `OperationStatus` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
/// - The `IndexerServiceFFI` instance was created by this library
/// - The pointer will not be used after this function returns
#[unsafe(no_mangle)]
pub unsafe extern "C" fn stop_indexer(indexer: *mut IndexerServiceFFI) -> OperationStatus {
if indexer.is_null() {
log::error!("Attempted to stop a null indexer pointer. This is a bug. Aborting.");
return OperationStatus::NullPointer;
}
let indexer = unsafe { Box::from_raw(indexer) };
drop(indexer);
OperationStatus::Ok
}

View File

@ -0,0 +1,14 @@
use std::ffi::{CString, c_char};
/// # Safety
/// It's up to the caller to pass a proper pointer, if somehow from c/c++ side
/// this is called with a type which doesn't come from a returned `CString` it
/// will cause a segfault.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_cstring(block: *mut c_char) {
if block.is_null() {
log::error!("Trying to free a null pointer. Exiting");
return;
}
drop(unsafe { CString::from_raw(block) });
}

View File

@ -0,0 +1,8 @@
pub use result::PointerResult;
pub mod client;
pub mod lifecycle;
pub mod memory;
pub mod query;
pub mod result;
pub mod types;

View File

@ -0,0 +1,348 @@
use indexer_service_protocol::{AccountId, HashType};
use indexer_service_rpc::RpcClient as _;
use crate::{
IndexerServiceFFI, Runtime,
api::{
PointerResult,
types::{
FfiAccountId, FfiBlockId, FfiHashType, FfiOption, FfiVec,
account::FfiAccount,
block::{FfiBlock, FfiBlockOpt},
transaction::FfiTransaction,
},
},
errors::OperationStatus,
};
/// Query the last block id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
///
/// # Returns
///
/// A `PointerResult<Option<u64>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_last_block(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
) -> PointerResult<Option<u64>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_last_finalized_block_id())
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
PointerResult::from_value,
)
}
/// Query the block by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `block_id`: `u64` number of block id
///
/// # Returns
///
/// A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
block_id: FfiBlockId,
) -> PointerResult<FfiBlockOpt, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_block_by_id(block_id))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_opt| {
let block_ffi = block_opt.map_or_else(FfiBlockOpt::from_none, |block| {
FfiBlockOpt::from_value(block.into())
});
PointerResult::from_value(block_ffi)
},
)
}
/// Query the block by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `hash`: `FfiHashType` - hash of block
///
/// # Returns
///
/// A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block_by_hash(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
hash: FfiHashType,
) -> PointerResult<FfiBlockOpt, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_block_by_hash(HashType(hash.data)))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_opt| {
let block_ffi = block_opt.map_or_else(FfiBlockOpt::from_none, |block| {
FfiBlockOpt::from_value(block.into())
});
PointerResult::from_value(block_ffi)
},
)
}
/// Query the account by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `account_id`: `FfiAccountId` - id of queried account
///
/// # Returns
///
/// A `PointerResult<FfiAccount, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_account(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
) -> PointerResult<FfiAccount, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_account(AccountId {
value: account_id.data,
}))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|acc| {
let acc_nssa: nssa::Account =
acc.try_into().expect("Source is in blocks, must fit");
PointerResult::from_value(acc_nssa.into())
},
)
}
/// Query the trasnaction by hash from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `hash`: `FfiHashType` - hash of transaction
///
/// # Returns
///
/// A `PointerResult<FfiOption<FfiTransaction>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_transaction(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
hash: FfiHashType,
) -> PointerResult<FfiOption<FfiTransaction>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_transaction(HashType(hash.data)))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|tx_opt| {
let tx_ffi = tx_opt.map_or_else(FfiOption::<FfiTransaction>::from_none, |tx| {
FfiOption::<FfiTransaction>::from_value(tx.into())
});
PointerResult::from_value(tx_ffi)
},
)
}
/// Query the blocks by block range from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `before`: `FfiOption<u64>` - end block of query
/// - `limit`: `u64` - number of blocks to query before `before`
///
/// # Returns
///
/// A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block_vec(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
before: FfiOption<u64>,
limit: u64,
) -> PointerResult<FfiVec<FfiBlock>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
let before_std = before.is_some.then(|| unsafe { *before.value });
runtime
.block_on(client.get_blocks(before_std, limit))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_vec| {
PointerResult::from_value(
block_vec
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
)
},
)
}
/// Query the transactions range by account id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the [`IndexerServiceFFI`] instance to be queried.
/// - `account_id`: `FfiAccountId` - id of queried account
/// - `offset`: `u64` - first tx id of query
/// - `limit`: `u64` - number of tx ids to query after `offset`
///
/// # Returns
///
/// A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a [`IndexerServiceFFI`] instance.
/// - `runtime` is a valid pointer to a [`Runtime`] instance.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_transactions_by_account(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
offset: u64,
limit: u64,
) -> PointerResult<FfiVec<FfiTransaction>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = indexer.client();
let runtime = unsafe { &*runtime };
runtime
.block_on(client.get_transactions_by_account(
AccountId {
value: account_id.data,
},
offset,
limit,
))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|tx_vec| {
PointerResult::from_value(
tx_vec
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
)
},
)
}

View File

@ -0,0 +1,29 @@
/// Simple wrapper around a pointer to a value or an error.
///
/// Pointer is not guaranteed. You should check the error field before
/// dereferencing the pointer.
#[repr(C)]
pub struct PointerResult<Type, Error> {
pub value: *mut Type,
pub error: Error,
}
impl<Type, Error: Default> PointerResult<Type, Error> {
pub fn from_pointer(pointer: *mut Type) -> Self {
Self {
value: pointer,
error: Error::default(),
}
}
pub fn from_value(value: Type) -> Self {
Self::from_pointer(Box::into_raw(Box::new(value)))
}
pub const fn from_error(error: Error) -> Self {
Self {
value: std::ptr::null_mut(),
error,
}
}
}

View File

@ -0,0 +1,119 @@
use indexer_service_protocol::ProgramId;
use crate::api::types::{FfiBytes32, FfiProgramId, FfiU128};
/// Account data structure - C-compatible version of nssa Account.
///
/// Note: `balance` and `nonce` are u128 values represented as little-endian
/// byte arrays since C doesn't have native u128 support.
#[repr(C)]
pub struct FfiAccount {
pub program_owner: FfiProgramId,
/// Balance as little-endian [u8; 16].
pub balance: FfiU128,
/// Pointer to account data bytes.
pub data: *mut u8,
/// Length of account data.
pub data_len: usize,
/// Capacity of account data.
pub data_cap: usize,
/// Nonce as little-endian [u8; 16].
pub nonce: FfiU128,
}
// Helper functions to convert between Rust and FFI types
impl From<&nssa::AccountId> for FfiBytes32 {
fn from(id: &nssa::AccountId) -> Self {
Self::from_account_id(id)
}
}
impl From<nssa::Account> for FfiAccount {
fn from(value: nssa::Account) -> Self {
let nssa::Account {
program_owner,
balance,
data,
nonce,
} = value;
let (data, data_len, data_cap) = data.into_inner().into_raw_parts();
let program_owner = FfiProgramId {
data: program_owner,
};
Self {
program_owner,
balance: balance.into(),
data,
data_len,
data_cap,
nonce: nonce.0.into(),
}
}
}
impl From<FfiAccount> for indexer_service_protocol::Account {
fn from(value: FfiAccount) -> Self {
let FfiAccount {
program_owner,
balance,
data,
data_cap,
data_len,
nonce,
} = value;
Self {
program_owner: ProgramId(program_owner.data),
balance: balance.into(),
data: indexer_service_protocol::Data(unsafe {
Vec::from_raw_parts(data, data_len, data_cap)
}),
nonce: nonce.into(),
}
}
}
impl From<&FfiAccount> for indexer_service_protocol::Account {
fn from(value: &FfiAccount) -> Self {
let &FfiAccount {
program_owner,
balance,
data,
data_cap,
data_len,
nonce,
} = value;
Self {
program_owner: ProgramId(program_owner.data),
balance: balance.into(),
data: indexer_service_protocol::Data(unsafe {
Vec::from_raw_parts(data, data_len, data_cap)
}),
nonce: nonce.into(),
}
}
}
/// Frees the resources associated with the given ffi account.
///
/// # Arguments
///
/// - `val`: An instance of `FfiAccount`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiAccount`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_account(val: FfiAccount) {
let orig_val: indexer_service_protocol::Account = val.into();
drop(orig_val);
}

View File

@ -0,0 +1,199 @@
use indexer_service_protocol::{
BedrockStatus, Block, BlockHeader, HashType, MantleMsgId, Signature,
};
use crate::api::types::{
FfiBlockId, FfiHashType, FfiMsgId, FfiOption, FfiSignature, FfiTimestamp, FfiVec,
transaction::free_ffi_transaction_vec, vectors::FfiBlockBody,
};
#[repr(C)]
pub struct FfiBlock {
pub header: FfiBlockHeader,
pub body: FfiBlockBody,
pub bedrock_status: FfiBedrockStatus,
pub bedrock_parent_id: FfiMsgId,
}
impl From<Block> for FfiBlock {
fn from(value: Block) -> Self {
let Block {
header,
body,
bedrock_status,
bedrock_parent_id,
} = value;
Self {
header: header.into(),
body: body
.transactions
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
bedrock_status: bedrock_status.into(),
bedrock_parent_id: bedrock_parent_id.into(),
}
}
}
pub type FfiBlockOpt = FfiOption<FfiBlock>;
#[repr(C)]
pub struct FfiBlockHeader {
pub block_id: FfiBlockId,
pub prev_block_hash: FfiHashType,
pub hash: FfiHashType,
pub timestamp: FfiTimestamp,
pub signature: FfiSignature,
}
impl From<BlockHeader> for FfiBlockHeader {
fn from(value: BlockHeader) -> Self {
let BlockHeader {
block_id,
prev_block_hash,
hash,
timestamp,
signature,
} = value;
Self {
block_id,
prev_block_hash: prev_block_hash.into(),
hash: hash.into(),
timestamp,
signature: signature.into(),
}
}
}
#[repr(C)]
pub enum FfiBedrockStatus {
Pending = 0x0,
Safe,
Finalized,
}
impl From<BedrockStatus> for FfiBedrockStatus {
fn from(value: BedrockStatus) -> Self {
match value {
BedrockStatus::Finalized => Self::Finalized,
BedrockStatus::Pending => Self::Pending,
BedrockStatus::Safe => Self::Safe,
}
}
}
impl From<FfiBedrockStatus> for BedrockStatus {
fn from(value: FfiBedrockStatus) -> Self {
match value {
FfiBedrockStatus::Finalized => Self::Finalized,
FfiBedrockStatus::Pending => Self::Pending,
FfiBedrockStatus::Safe => Self::Safe,
}
}
}
/// Frees the resources associated with the given ffi block.
///
/// # Arguments
///
/// - `val`: An instance of `FfiBlock`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiBlock`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block(val: FfiBlock) {
// We don't really need all the casts, but just in case
// All except `ffi_tx_ffi_vec` is Copy types, so no need for Drop
let _ = BlockHeader {
block_id: val.header.block_id,
prev_block_hash: HashType(val.header.prev_block_hash.data),
hash: HashType(val.header.hash.data),
timestamp: val.header.timestamp,
signature: Signature(val.header.signature.data),
};
let ffi_tx_ffi_vec = val.body;
#[expect(clippy::let_underscore_must_use, reason = "No use for this Copy type")]
let _: BedrockStatus = val.bedrock_status.into();
let _ = MantleMsgId(val.bedrock_parent_id.data);
unsafe {
free_ffi_transaction_vec(ffi_tx_ffi_vec);
};
}
/// Frees the resources associated with the given ffi block option.
///
/// # Arguments
///
/// - `val`: An instance of `FfiBlockOpt`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiBlockOpt`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block_opt(val: FfiBlockOpt) {
if val.is_some {
let value = unsafe { Box::from_raw(val.value) };
// We don't really need all the casts, but just in case
// All except `ffi_tx_ffi_vec` is Copy types, so no need for Drop
let _ = BlockHeader {
block_id: value.header.block_id,
prev_block_hash: HashType(value.header.prev_block_hash.data),
hash: HashType(value.header.hash.data),
timestamp: value.header.timestamp,
signature: Signature(value.header.signature.data),
};
let ffi_tx_ffi_vec = value.body;
#[expect(clippy::let_underscore_must_use, reason = "No use for this Copy type")]
let _: BedrockStatus = value.bedrock_status.into();
let _ = MantleMsgId(value.bedrock_parent_id.data);
unsafe {
free_ffi_transaction_vec(ffi_tx_ffi_vec);
};
}
}
/// Frees the resources associated with the given ffi block vector.
///
/// # Arguments
///
/// - `val`: An instance of `FfiVec<FfiBlock>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiVec<FfiBlock>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block_vec(val: FfiVec<FfiBlock>) {
let ffi_block_std_vec: Vec<_> = val.into();
for block in ffi_block_std_vec {
unsafe {
free_ffi_block(block);
}
}
}

View File

@ -0,0 +1,165 @@
use indexer_service_protocol::{AccountId, HashType, MantleMsgId, ProgramId, PublicKey, Signature};
pub mod account;
pub mod block;
pub mod transaction;
pub mod vectors;
/// 32-byte array type for `AccountId`, keys, hashes, etc.
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiBytes32 {
pub data: [u8; 32],
}
/// 64-byte array type for signatures, etc.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct FfiBytes64 {
pub data: [u8; 64],
}
/// Program ID - 8 u32 values (32 bytes total).
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiProgramId {
pub data: [u32; 8],
}
impl From<ProgramId> for FfiProgramId {
fn from(value: ProgramId) -> Self {
Self { data: value.0 }
}
}
/// U128 - 16 bytes little endian.
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiU128 {
pub data: [u8; 16],
}
impl FfiBytes32 {
/// Create from a 32-byte array.
#[must_use]
pub const fn from_bytes(bytes: [u8; 32]) -> Self {
Self { data: bytes }
}
/// Create from an `AccountId`.
#[must_use]
pub const fn from_account_id(id: &nssa::AccountId) -> Self {
Self { data: *id.value() }
}
}
impl From<u128> for FfiU128 {
fn from(value: u128) -> Self {
Self {
data: value.to_le_bytes(),
}
}
}
impl From<FfiU128> for u128 {
fn from(value: FfiU128) -> Self {
Self::from_le_bytes(value.data)
}
}
pub type FfiHashType = FfiBytes32;
pub type FfiMsgId = FfiBytes32;
pub type FfiBlockId = u64;
pub type FfiTimestamp = u64;
pub type FfiSignature = FfiBytes64;
pub type FfiAccountId = FfiBytes32;
pub type FfiNonce = FfiU128;
pub type FfiPublicKey = FfiBytes32;
impl From<HashType> for FfiHashType {
fn from(value: HashType) -> Self {
Self { data: value.0 }
}
}
impl From<MantleMsgId> for FfiMsgId {
fn from(value: MantleMsgId) -> Self {
Self { data: value.0 }
}
}
impl From<Signature> for FfiSignature {
fn from(value: Signature) -> Self {
Self { data: value.0 }
}
}
impl From<AccountId> for FfiAccountId {
fn from(value: AccountId) -> Self {
Self { data: value.value }
}
}
impl From<PublicKey> for FfiPublicKey {
fn from(value: PublicKey) -> Self {
Self { data: value.0 }
}
}
#[repr(C)]
pub struct FfiVec<T> {
pub entries: *mut T,
pub len: usize,
pub capacity: usize,
}
impl<T> From<Vec<T>> for FfiVec<T> {
fn from(value: Vec<T>) -> Self {
let (entries, len, capacity) = value.into_raw_parts();
Self {
entries,
len,
capacity,
}
}
}
impl<T> From<FfiVec<T>> for Vec<T> {
fn from(value: FfiVec<T>) -> Self {
unsafe { Self::from_raw_parts(value.entries, value.len, value.capacity) }
}
}
impl<T> FfiVec<T> {
/// # Safety
///
/// `index` must be lesser than `self.len`.
#[must_use]
pub unsafe fn get(&self, index: usize) -> &T {
let ptr = unsafe { self.entries.add(index) };
unsafe { &*ptr }
}
}
#[repr(C)]
pub struct FfiOption<T> {
pub value: *mut T,
pub is_some: bool,
}
impl<T> FfiOption<T> {
pub fn from_value(val: T) -> Self {
Self {
value: Box::into_raw(Box::new(val)),
is_some: true,
}
}
#[must_use]
pub const fn from_none() -> Self {
Self {
value: std::ptr::null_mut(),
is_some: false,
}
}
}

View File

@ -0,0 +1,548 @@
use indexer_service_protocol::{
AccountId, Ciphertext, Commitment, CommitmentSetDigest, EncryptedAccountData,
EphemeralPublicKey, HashType, Nullifier, PrivacyPreservingMessage,
PrivacyPreservingTransaction, ProgramDeploymentMessage, ProgramDeploymentTransaction,
ProgramId, Proof, PublicKey, PublicMessage, PublicTransaction, Signature, Transaction,
ValidityWindow, WitnessSet,
};
use crate::api::types::{
FfiBytes32, FfiHashType, FfiOption, FfiProgramId, FfiPublicKey, FfiSignature, FfiVec,
vectors::{
FfiAccountIdList, FfiAccountList, FfiEncryptedAccountDataList, FfiInstructionDataList,
FfiNonceList, FfiNullifierCommitmentSetList, FfiProgramDeploymentMessage, FfiProof,
FfiSignaturePubKeyList, FfiVecBytes32, FfiVecU8,
},
};
#[repr(C)]
pub struct FfiPublicTransactionBody {
pub hash: FfiHashType,
pub message: FfiPublicMessage,
pub witness_set: FfiSignaturePubKeyList,
}
impl From<PublicTransaction> for FfiPublicTransactionBody {
fn from(value: PublicTransaction) -> Self {
let PublicTransaction {
hash,
message,
witness_set,
} = value;
Self {
hash: hash.into(),
message: message.into(),
witness_set: witness_set
.signatures_and_public_keys
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl From<Box<FfiPublicTransactionBody>> for PublicTransaction {
fn from(value: Box<FfiPublicTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: PublicMessage {
program_id: ProgramId(value.message.program_id.data),
account_ids: {
let std_vec: Vec<_> = value.message.account_ids.into();
std_vec
.into_iter()
.map(|ffi_val| AccountId {
value: ffi_val.data,
})
.collect()
},
nonces: {
let std_vec: Vec<_> = value.message.nonces.into();
std_vec.into_iter().map(Into::into).collect()
},
instruction_data: value.message.instruction_data.into(),
},
witness_set: WitnessSet {
signatures_and_public_keys: {
let std_vec: Vec<_> = value.witness_set.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Signature(ffi_val.signature.data),
PublicKey(ffi_val.public_key.data),
)
})
.collect()
},
proof: None,
},
}
}
}
#[repr(C)]
pub struct FfiPublicMessage {
pub program_id: FfiProgramId,
pub account_ids: FfiAccountIdList,
pub nonces: FfiNonceList,
pub instruction_data: FfiInstructionDataList,
}
impl From<PublicMessage> for FfiPublicMessage {
fn from(value: PublicMessage) -> Self {
let PublicMessage {
program_id,
account_ids,
nonces,
instruction_data,
} = value;
Self {
program_id: program_id.into(),
account_ids: account_ids
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
nonces: nonces
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
instruction_data: instruction_data.into(),
}
}
}
#[repr(C)]
pub struct FfiPrivateTransactionBody {
pub hash: FfiHashType,
pub message: FfiPrivacyPreservingMessage,
pub witness_set: FfiSignaturePubKeyList,
pub proof: FfiProof,
}
impl From<PrivacyPreservingTransaction> for FfiPrivateTransactionBody {
fn from(value: PrivacyPreservingTransaction) -> Self {
let PrivacyPreservingTransaction {
hash,
message,
witness_set,
} = value;
Self {
hash: hash.into(),
message: message.into(),
witness_set: witness_set
.signatures_and_public_keys
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
proof: witness_set
.proof
.expect("Private execution: proof must be present")
.0
.into(),
}
}
}
impl From<Box<FfiPrivateTransactionBody>> for PrivacyPreservingTransaction {
fn from(value: Box<FfiPrivateTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: PrivacyPreservingMessage {
public_account_ids: {
let std_vec: Vec<_> = value.message.public_account_ids.into();
std_vec
.into_iter()
.map(|ffi_val| AccountId {
value: ffi_val.data,
})
.collect()
},
nonces: {
let std_vec: Vec<_> = value.message.nonces.into();
std_vec.into_iter().map(Into::into).collect()
},
public_post_states: {
let std_vec: Vec<_> = value.message.public_post_states.into();
std_vec.into_iter().map(Into::into).collect()
},
encrypted_private_post_states: {
let std_vec: Vec<_> = value.message.encrypted_private_post_states.into();
std_vec
.into_iter()
.map(|ffi_val| EncryptedAccountData {
ciphertext: Ciphertext(ffi_val.ciphertext.into()),
epk: EphemeralPublicKey(ffi_val.epk.into()),
view_tag: ffi_val.view_tag,
})
.collect()
},
new_commitments: {
let std_vec: Vec<_> = value.message.new_commitments.into();
std_vec
.into_iter()
.map(|ffi_val| Commitment(ffi_val.data))
.collect()
},
new_nullifiers: {
let std_vec: Vec<_> = value.message.new_nullifiers.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Nullifier(ffi_val.nullifier.data),
CommitmentSetDigest(ffi_val.commitment_set_digest.data),
)
})
.collect()
},
block_validity_window: cast_ffi_validity_window(
value.message.block_validity_window,
),
timestamp_validity_window: cast_ffi_validity_window(
value.message.timestamp_validity_window,
),
},
witness_set: WitnessSet {
signatures_and_public_keys: {
let std_vec: Vec<_> = value.witness_set.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Signature(ffi_val.signature.data),
PublicKey(ffi_val.public_key.data),
)
})
.collect()
},
proof: Some(Proof(value.proof.into())),
},
}
}
}
#[repr(C)]
pub struct FfiPrivacyPreservingMessage {
pub public_account_ids: FfiAccountIdList,
pub nonces: FfiNonceList,
pub public_post_states: FfiAccountList,
pub encrypted_private_post_states: FfiEncryptedAccountDataList,
pub new_commitments: FfiVecBytes32,
pub new_nullifiers: FfiNullifierCommitmentSetList,
pub block_validity_window: [u64; 2],
pub timestamp_validity_window: [u64; 2],
}
impl From<PrivacyPreservingMessage> for FfiPrivacyPreservingMessage {
fn from(value: PrivacyPreservingMessage) -> Self {
let PrivacyPreservingMessage {
public_account_ids,
nonces,
public_post_states,
encrypted_private_post_states,
new_commitments,
new_nullifiers,
block_validity_window,
timestamp_validity_window,
} = value;
Self {
public_account_ids: public_account_ids
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
nonces: nonces
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
public_post_states: public_post_states
.into_iter()
.map(|acc_ind| -> nssa::Account {
acc_ind.try_into().expect("Source is in blocks, must fit")
})
.map(Into::into)
.collect::<Vec<_>>()
.into(),
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
new_commitments: new_commitments
.into_iter()
.map(|comm| FfiBytes32 { data: comm.0 })
.collect::<Vec<_>>()
.into(),
new_nullifiers: new_nullifiers
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
block_validity_window: cast_validity_window(block_validity_window),
timestamp_validity_window: cast_validity_window(timestamp_validity_window),
}
}
}
#[repr(C)]
pub struct FfiNullifierCommitmentSet {
pub nullifier: FfiBytes32,
pub commitment_set_digest: FfiBytes32,
}
impl From<(Nullifier, CommitmentSetDigest)> for FfiNullifierCommitmentSet {
fn from(value: (Nullifier, CommitmentSetDigest)) -> Self {
Self {
nullifier: FfiBytes32 { data: value.0.0 },
commitment_set_digest: FfiBytes32 { data: value.1.0 },
}
}
}
#[repr(C)]
pub struct FfiEncryptedAccountData {
pub ciphertext: FfiVecU8,
pub epk: FfiVecU8,
pub view_tag: u8,
}
impl From<EncryptedAccountData> for FfiEncryptedAccountData {
fn from(value: EncryptedAccountData) -> Self {
let EncryptedAccountData {
ciphertext,
epk,
view_tag,
} = value;
Self {
ciphertext: ciphertext.0.into(),
epk: epk.0.into(),
view_tag,
}
}
}
#[repr(C)]
pub struct FfiSignaturePubKeyEntry {
pub signature: FfiSignature,
pub public_key: FfiPublicKey,
}
impl From<(Signature, PublicKey)> for FfiSignaturePubKeyEntry {
fn from(value: (Signature, PublicKey)) -> Self {
Self {
signature: value.0.into(),
public_key: value.1.into(),
}
}
}
#[repr(C)]
pub struct FfiProgramDeploymentTransactionBody {
pub hash: FfiHashType,
pub message: FfiProgramDeploymentMessage,
}
impl From<Box<FfiProgramDeploymentTransactionBody>> for ProgramDeploymentTransaction {
fn from(value: Box<FfiProgramDeploymentTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: ProgramDeploymentMessage {
bytecode: value.message.into(),
},
}
}
}
impl From<ProgramDeploymentTransaction> for FfiProgramDeploymentTransactionBody {
fn from(value: ProgramDeploymentTransaction) -> Self {
let ProgramDeploymentTransaction { hash, message } = value;
Self {
hash: hash.into(),
message: message.bytecode.into(),
}
}
}
#[repr(C)]
pub struct FfiTransactionBody {
pub public_body: *mut FfiPublicTransactionBody,
pub private_body: *mut FfiPrivateTransactionBody,
pub program_deployment_body: *mut FfiProgramDeploymentTransactionBody,
}
#[repr(C)]
pub struct FfiTransaction {
pub body: FfiTransactionBody,
pub kind: FfiTransactionKind,
}
impl From<Transaction> for FfiTransaction {
fn from(value: Transaction) -> Self {
match value {
Transaction::Public(pub_tx) => Self {
body: FfiTransactionBody {
public_body: Box::into_raw(Box::new(pub_tx.into())),
private_body: std::ptr::null_mut(),
program_deployment_body: std::ptr::null_mut(),
},
kind: FfiTransactionKind::Public,
},
Transaction::PrivacyPreserving(priv_tx) => Self {
body: FfiTransactionBody {
public_body: std::ptr::null_mut(),
private_body: Box::into_raw(Box::new(priv_tx.into())),
program_deployment_body: std::ptr::null_mut(),
},
kind: FfiTransactionKind::Private,
},
Transaction::ProgramDeployment(pr_dep_tx) => Self {
body: FfiTransactionBody {
public_body: std::ptr::null_mut(),
private_body: std::ptr::null_mut(),
program_deployment_body: Box::into_raw(Box::new(pr_dep_tx.into())),
},
kind: FfiTransactionKind::ProgramDeploy,
},
}
}
}
#[repr(C)]
pub enum FfiTransactionKind {
Public = 0x0,
Private,
ProgramDeploy,
}
/// Frees the resources associated with the given ffi transaction.
///
/// # Arguments
///
/// - `val`: An instance of `FfiTransaction`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiTransaction`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction(val: FfiTransaction) {
match val.kind {
FfiTransactionKind::Public => {
let body = unsafe { Box::from_raw(val.body.public_body) };
let std_body: PublicTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::Private => {
let body = unsafe { Box::from_raw(val.body.private_body) };
let std_body: PrivacyPreservingTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::ProgramDeploy => {
let body = unsafe { Box::from_raw(val.body.program_deployment_body) };
let std_body: ProgramDeploymentTransaction = body.into();
drop(std_body);
}
}
}
/// Frees the resources associated with the given ffi transaction option.
///
/// # Arguments
///
/// - `val`: An instance of `FfiOption<FfiTransaction>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiOption<FfiTransaction>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction_opt(val: FfiOption<FfiTransaction>) {
if val.is_some {
let value = unsafe { Box::from_raw(val.value) };
match value.kind {
FfiTransactionKind::Public => {
let body = unsafe { Box::from_raw(value.body.public_body) };
let std_body: PublicTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::Private => {
let body = unsafe { Box::from_raw(value.body.private_body) };
let std_body: PrivacyPreservingTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::ProgramDeploy => {
let body = unsafe { Box::from_raw(value.body.program_deployment_body) };
let std_body: ProgramDeploymentTransaction = body.into();
drop(std_body);
}
}
}
}
/// Frees the resources associated with the given vector of ffi transactions.
///
/// # Arguments
///
/// - `val`: An instance of `FfiVec<FfiTransaction>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiVec<FfiTransaction>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction_vec(val: FfiVec<FfiTransaction>) {
let ffi_tx_std_vec: Vec<_> = val.into();
for tx in ffi_tx_std_vec {
unsafe {
free_ffi_transaction(tx);
}
}
}
fn cast_validity_window(window: ValidityWindow) -> [u64; 2] {
[
window.0.0.unwrap_or_default(),
window.0.1.unwrap_or(u64::MAX),
]
}
const fn cast_ffi_validity_window(ffi_window: [u64; 2]) -> ValidityWindow {
let left = if ffi_window[0] == 0 {
None
} else {
Some(ffi_window[0])
};
let right = if ffi_window[1] == u64::MAX {
None
} else {
Some(ffi_window[1])
};
ValidityWindow((left, right))
}

View File

@ -0,0 +1,31 @@
use crate::api::types::{
FfiAccountId, FfiBytes32, FfiNonce, FfiVec,
account::FfiAccount,
transaction::{
FfiEncryptedAccountData, FfiNullifierCommitmentSet, FfiSignaturePubKeyEntry, FfiTransaction,
},
};
pub type FfiVecU8 = FfiVec<u8>;
pub type FfiAccountList = FfiVec<FfiAccount>;
pub type FfiAccountIdList = FfiVec<FfiAccountId>;
pub type FfiVecBytes32 = FfiVec<FfiBytes32>;
pub type FfiBlockBody = FfiVec<FfiTransaction>;
pub type FfiNonceList = FfiVec<FfiNonce>;
pub type FfiInstructionDataList = FfiVec<u32>;
pub type FfiSignaturePubKeyList = FfiVec<FfiSignaturePubKeyEntry>;
pub type FfiProof = FfiVecU8;
pub type FfiProgramDeploymentMessage = FfiVecU8;
pub type FfiEncryptedAccountDataList = FfiVec<FfiEncryptedAccountData>;
pub type FfiNullifierCommitmentSetList = FfiVec<FfiNullifierCommitmentSet>;

View File

@ -4,7 +4,6 @@ use anyhow::{Context as _, Result};
use log::info;
pub use url::Url;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait IndexerClientTrait: Clone {
async fn new(indexer_url: &Url) -> Result<Self>;
}

23
indexer/ffi/src/errors.rs Normal file
View File

@ -0,0 +1,23 @@
#[derive(Debug, Default, PartialEq, Eq)]
#[repr(C)]
pub enum OperationStatus {
#[default]
Ok = 0x0,
NullPointer = 0x1,
InitializationError = 0x2,
ClientError = 0x3,
}
impl OperationStatus {
#[must_use]
#[unsafe(no_mangle)]
pub extern "C" fn is_ok(&self) -> bool {
*self == Self::Ok
}
#[must_use]
#[unsafe(no_mangle)]
pub extern "C" fn is_error(&self) -> bool {
!self.is_ok()
}
}

View File

@ -0,0 +1,95 @@
use std::{ffi::c_void, net::SocketAddr};
use indexer_service::IndexerHandle;
use crate::client::IndexerClient;
#[repr(C)]
pub struct IndexerServiceFFI {
indexer_handle: *mut c_void,
indexer_client: *mut c_void,
}
impl IndexerServiceFFI {
#[must_use]
pub fn new(
indexer_handle: indexer_service::IndexerHandle,
indexer_client: IndexerClient,
) -> Self {
Self {
// Box the complex types and convert to opaque pointers
indexer_handle: Box::into_raw(Box::new(indexer_handle)).cast::<c_void>(),
indexer_client: Box::into_raw(Box::new(indexer_client)).cast::<c_void>(),
}
}
/// Helper to take ownership back.
#[must_use]
pub fn into_parts(mut self) -> (Box<IndexerHandle>, Box<IndexerClient>) {
let Self {
indexer_handle,
indexer_client,
} = &mut self;
let indexer_handle_boxed = unsafe { Box::from_raw(indexer_handle.cast::<IndexerHandle>()) };
let indexer_client_boxed = unsafe { Box::from_raw(indexer_client.cast::<IndexerClient>()) };
// Assigning nulls to prevent double free on drop, since ownership is transferred to caller
*indexer_handle = std::ptr::null_mut();
*indexer_client = std::ptr::null_mut();
(indexer_handle_boxed, indexer_client_boxed)
}
/// Helper to get indexer handle addr.
#[must_use]
pub const fn addr(&self) -> SocketAddr {
let indexer_handle = unsafe {
self.indexer_handle
.cast::<IndexerHandle>()
.as_ref()
.expect("Indexer Handle must be non-null pointer")
};
indexer_handle.addr()
}
/// Helper to get indexer handle ref.
#[must_use]
pub const fn handle(&self) -> &IndexerHandle {
unsafe {
self.indexer_handle
.cast::<IndexerHandle>()
.as_ref()
.expect("Indexer Handle must be non-null pointer")
}
}
/// Helper to get indexer client ref.
#[must_use]
pub const fn client(&self) -> &IndexerClient {
unsafe {
self.indexer_client
.cast::<IndexerClient>()
.as_ref()
.expect("Indexer Client must be non-null pointer")
}
}
}
// Implement Drop to prevent memory leaks
impl Drop for IndexerServiceFFI {
fn drop(&mut self) {
let Self {
indexer_handle,
indexer_client,
} = self;
if !indexer_handle.is_null() {
drop(unsafe { Box::from_raw(indexer_handle.cast::<IndexerHandle>()) });
}
if !indexer_client.is_null() {
drop(unsafe { Box::from_raw(indexer_client.cast::<IndexerClient>()) });
}
}
}

11
indexer/ffi/src/lib.rs Normal file
View File

@ -0,0 +1,11 @@
#![allow(clippy::undocumented_unsafe_blocks, reason = "It is an FFI")]
pub use errors::OperationStatus;
pub use indexer::IndexerServiceFFI;
pub use runtime::Runtime;
pub mod api;
mod client;
mod errors;
mod indexer;
mod runtime;

129
indexer/ffi/src/runtime.rs Normal file
View File

@ -0,0 +1,129 @@
use std::ffi::c_void;
/// Wrapper around [`tokio::runtime::Runtime`] that can be safely passed across the FFI boundary.
#[repr(C)]
pub struct Runtime {
inner: Pointer<tokio::runtime::Runtime>,
}
impl Runtime {
/// Creates a new owned [`Runtime`] instance.
pub fn new() -> Result<Self, Box<dyn std::error::Error>> {
let inner = tokio::runtime::Runtime::new()?;
Ok(Self {
inner: Pointer::owned(inner),
})
}
/// Creates a new owned [`Runtime`] instance from an existing [`tokio::runtime::Runtime`].
pub fn from_owned(inner: tokio::runtime::Runtime) -> Self {
Self {
inner: Pointer::owned(inner),
}
}
/// Creates a new borrowed [`Runtime`] instance from a reference to an existing
/// `tokio::runtime::Runtime`.
///
/// # Safety
/// The caller must ensure that the provided reference remains valid for the lifetime of the
/// returned [`Runtime`].
pub const unsafe fn from_borrowed(inner: &tokio::runtime::Runtime) -> Self {
Self {
// SAFETY: The caller must ensure the validness of the `inner` reference.
inner: unsafe { Pointer::borrowed(inner) },
}
}
}
impl AsRef<tokio::runtime::Runtime> for Runtime {
fn as_ref(&self) -> &tokio::runtime::Runtime {
self.inner
.as_ref()
.expect("Runtime pointer should not be null")
}
}
impl std::ops::Deref for Runtime {
type Target = tokio::runtime::Runtime;
fn deref(&self) -> &Self::Target {
self.as_ref()
}
}
#[repr(C)]
struct Pointer<T> {
kind: PointerKind,
_marker: std::marker::PhantomData<T>,
}
#[repr(C)]
enum PointerKind {
Owned(*mut c_void),
Borrowed(*const c_void),
Null,
}
impl<T> Pointer<T> {
/// Creates a new owned pointer from a value.
pub fn owned(value: T) -> Self {
let boxed = Box::new(value);
let kind = PointerKind::Owned(Box::into_raw(boxed).cast::<c_void>());
Self {
kind,
_marker: std::marker::PhantomData,
}
}
/// Creates a new borrowed pointer from a reference to an existing value.
///
/// # Safety
/// The caller must ensure that the provided reference remains valid for the lifetime of the
/// returned pointer.
pub const unsafe fn borrowed(value: &T) -> Self {
let kind = PointerKind::Borrowed(std::ptr::from_ref(value).cast::<c_void>());
Self {
kind,
_marker: std::marker::PhantomData,
}
}
/// Returns a reference to the value if the pointer is owned or borrowed, or [`None`] if it is
/// null.
pub const fn as_ref(&self) -> Option<&T> {
match self.kind {
PointerKind::Owned(ptr) => unsafe { (ptr.cast::<T>()).as_ref() },
PointerKind::Borrowed(ptr) => unsafe { (ptr.cast::<T>()).as_ref() },
PointerKind::Null => None,
}
}
/// Takes ownership of the pointer if it is owned, returning the raw pointer and leaving a null
/// pointer in its place.
/// If the pointer is borrowed or null, returns [`None`].
#[expect(dead_code, reason = "May be useful in future")]
pub fn take(&mut self) -> Option<T> {
match std::mem::replace(&mut self.kind, PointerKind::Null) {
PointerKind::Owned(ptr) => {
// SAFETY: We ensure that the pointer is valid and was allocated by us.
let boxed = unsafe { Box::from_raw(ptr.cast::<T>()) };
Some(*boxed)
}
PointerKind::Borrowed(_) | PointerKind::Null => None,
}
}
}
impl<T> Drop for Pointer<T> {
fn drop(&mut self) {
let Self { kind, _marker } = self;
if let PointerKind::Owned(ptr) = *kind {
// SAFETY: We ensure that the pointer is valid and was allocated by us.
unsafe {
drop(Box::from_raw(ptr.cast::<T>()));
}
}
}
}

View File

@ -1,160 +1,8 @@
{
"home": ".",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://localhost:8080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000
},
{
"account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
139,
19,
158,
11,
155,
231,
85,
206,
132,
228,
220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
}
},
{
"npk": [
173,
134,
33,
223,
54,
226,
10,
71,
215,
254,
143,
172,
24,
244,
243,
208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
}
}
],
"signing_key": [
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37
]
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101"
}

View File

@ -27,7 +27,7 @@ pub trait Rpc {
async fn subscribe_to_finalized_blocks(&self) -> SubscriptionResult;
#[method(name = "getLastFinalizedBlockId")]
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
async fn get_last_finalized_block_id(&self) -> Result<Option<BlockId>, ErrorObjectOwned>;
#[method(name = "getBlockById")]
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
@ -41,6 +41,13 @@ pub trait Rpc {
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getAccountAtBlock")]
async fn get_account_at_block(
&self,
account_id: AccountId,
block_id: BlockId,
) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(
&self,

View File

@ -16,6 +16,7 @@ pub struct IndexerHandle {
/// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>,
}
impl IndexerHandle {
const fn new(addr: SocketAddr, server_handle: ServerHandle) -> Self {
Self {

View File

@ -190,18 +190,16 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
Ok(())
}
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
self.state
async fn get_last_finalized_block_id(&self) -> Result<Option<BlockId>, ErrorObjectOwned> {
Ok(self
.state
.read()
.await
.blocks
.iter()
.rev()
.find(|block| block.bedrock_status == BedrockStatus::Finalized)
.map(|block| block.header.block_id)
.ok_or_else(|| {
ErrorObjectOwned::owned(-32001, "Last block not found".to_owned(), None::<()>)
})
.map(|block| block.header.block_id))
}
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
@ -239,6 +237,22 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
}
async fn get_account_at_block(
&self,
account_id: AccountId,
_block_id: BlockId,
) -> Result<Account, ErrorObjectOwned> {
// Mock service does not track historical state; returns current state regardless of
// block_id.
self.state
.read()
.await
.accounts
.get(&account_id)
.cloned()
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
}
async fn get_transaction(
&self,
tx_hash: HashType,

View File

@ -48,7 +48,7 @@ impl indexer_service_rpc::RpcServer for IndexerService {
Ok(())
}
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
async fn get_last_finalized_block_id(&self) -> Result<Option<BlockId>, ErrorObjectOwned> {
self.indexer.store.get_last_block_id().map_err(db_error)
}
@ -83,6 +83,19 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_account_at_block(
&self,
account_id: AccountId,
block_id: BlockId,
) -> Result<Account, ErrorObjectOwned> {
Ok(self
.indexer
.store
.account_state_at_block(&account_id.into(), block_id)
.map_err(db_error)?
.into())
}
async fn get_transaction(
&self,
tx_hash: HashType,
@ -201,43 +214,49 @@ impl SubscriptionService {
tokio::sync::mpsc::unbounded_channel::<Subscription<BlockId>>();
let handle = tokio::spawn(async move {
let mut subscribers = Vec::new();
let run_loop = async {
let mut subscribers = Vec::new();
let mut block_stream = pin!(indexer.subscribe_parse_block_stream());
let mut block_stream = pin!(indexer.subscribe_parse_block_stream());
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
loop {
tokio::select! {
sub = sub_receiver.recv() => {
let Some(subscription) = sub else {
bail!("Subscription receiver closed unexpectedly");
};
info!("Added new subscription with ID {:?}", subscription.sink.subscription_id());
subscribers.push(subscription);
}
block_opt = block_stream.next() => {
debug!("Got new block from block stream");
let Some(block) = block_opt else {
bail!("Block stream ended unexpectedly");
};
let block = block.context("Failed to get L2 block data")?;
let block: indexer_service_protocol::Block = block.into();
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
loop {
tokio::select! {
sub = sub_receiver.recv() => {
let Some(subscription) = sub else {
bail!("Subscription receiver closed unexpectedly");
};
info!("Added new subscription with ID {:?}", subscription.sink.subscription_id());
subscribers.push(subscription);
}
block_opt = block_stream.next() => {
debug!("Got new block from block stream");
let Some(block) = block_opt else {
bail!("Block stream ended unexpectedly");
};
let block = block.context("Failed to get L2 block data")?;
let block: indexer_service_protocol::Block = block.into();
for sub in &mut subscribers {
if let Err(err) = sub.try_send(&block.header.block_id) {
warn!(
"Failed to send block ID {:?} to subscription ID {:?} with error: {err:#?}",
block.header.block_id,
sub.sink.subscription_id(),
);
for sub in &mut subscribers {
if let Err(err) = sub.try_send(&block.header.block_id) {
warn!(
"Failed to send block ID {:?} to subscription ID {:?} with error: {err:#?}",
block.header.block_id,
sub.sink.subscription_id(),
);
}
}
}
}
}
}
};
let res: anyhow::Result<futures::never::Never> = run_loop.await;
let Err(err) = res;
error!("Subscription service loop has unexpectedly finished with error: {err:#?}");
Err(err)
});
SubscriptionLoopParts {
handle,

View File

@ -10,6 +10,7 @@ workspace = true
[dependencies]
nssa_core = { workspace = true, features = ["host"] }
nssa.workspace = true
authenticated_transfer_core.workspace = true
sequencer_core = { workspace = true, features = ["default", "testnet"] }
sequencer_service.workspace = true
wallet.workspace = true
@ -19,14 +20,16 @@ indexer_service.workspace = true
serde_json.workspace = true
token_core.workspace = true
ata_core.workspace = true
indexer_service_rpc.workspace = true
vault_core.workspace = true
faucet_core.workspace = true
indexer_service_rpc = { workspace = true, features = ["client"] }
sequencer_service_rpc = { workspace = true, features = ["client"] }
jsonrpsee = { workspace = true, features = ["ws-client"] }
wallet-ffi.workspace = true
indexer_ffi.workspace = true
testnet_initial_state.workspace = true
indexer_service_protocol.workspace = true
url.workspace = true
anyhow.workspace = true
env_logger.workspace = true
log.workspace = true
@ -35,4 +38,4 @@ hex.workspace = true
tempfile.workspace = true
bytesize.workspace = true
futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] }
testcontainers = { version = "0.27.3", features = ["docker-compose"] }

View File

@ -2,17 +2,30 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context as _, Result};
use bytesize::ByteSize;
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig};
use indexer_service::{ChannelId, ClientConfig, IndexerConfig};
use key_protocol::key_management::KeyChain;
use nssa::{Account, AccountId, PrivateKey, PublicKey};
use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID};
use sequencer_core::config::{BedrockConfig, SequencerConfig};
use testnet_initial_state::{
PrivateAccountPrivateInitialData, PrivateAccountPublicInitialData,
PublicAccountPrivateInitialData, PublicAccountPublicInitialData,
};
use nssa::{AccountId, PrivateKey, PublicKey};
use nssa_core::Identifier;
use sequencer_core::config::{BedrockConfig, GenesisAction, SequencerConfig};
use url::Url;
use wallet::config::{InitialAccountData, WalletConfig};
use wallet::config::WalletConfig;
pub const INITIAL_PUBLIC_BALANCES_FOR_WALLET: [u128; 2] = [10_000, 20_000];
pub const INITIAL_PRIVATE_BALANCES_FOR_WALLET: [u128; 2] = [10_000, 20_000];
#[derive(Clone)]
pub struct InitialPrivateAccountForWallet {
pub key_chain: KeyChain,
pub identifier: Identifier,
pub balance: u128,
}
impl InitialPrivateAccountForWallet {
#[must_use]
pub fn account_id(&self) -> AccountId {
AccountId::from((&self.key_chain.nullifier_public_key, self.identifier))
}
}
/// Sequencer config options available for custom changes in integration tests.
#[derive(Debug, Clone, Copy)]
@ -34,121 +47,6 @@ impl Default for SequencerPartialConfig {
}
}
pub struct InitialData {
pub public_accounts: Vec<(PrivateKey, u128)>,
pub private_accounts: Vec<(KeyChain, Account)>,
}
impl InitialData {
#[must_use]
pub fn with_two_public_and_two_private_initialized_accounts() -> Self {
let mut public_alice_private_key = PrivateKey::new_os_random();
let mut public_alice_public_key =
PublicKey::new_from_private_key(&public_alice_private_key);
let mut public_alice_account_id = AccountId::from(&public_alice_public_key);
let mut public_bob_private_key = PrivateKey::new_os_random();
let mut public_bob_public_key = PublicKey::new_from_private_key(&public_bob_private_key);
let mut public_bob_account_id = AccountId::from(&public_bob_public_key);
// Ensure consistent ordering
if public_alice_account_id > public_bob_account_id {
std::mem::swap(&mut public_alice_private_key, &mut public_bob_private_key);
std::mem::swap(&mut public_alice_public_key, &mut public_bob_public_key);
std::mem::swap(&mut public_alice_account_id, &mut public_bob_account_id);
}
let mut private_charlie_key_chain = KeyChain::new_os_random();
let mut private_charlie_account_id =
AccountId::from((&private_charlie_key_chain.nullifier_public_key, 0));
let mut private_david_key_chain = KeyChain::new_os_random();
let mut private_david_account_id =
AccountId::from((&private_david_key_chain.nullifier_public_key, 0));
// Ensure consistent ordering
if private_charlie_account_id > private_david_account_id {
std::mem::swap(&mut private_charlie_key_chain, &mut private_david_key_chain);
std::mem::swap(
&mut private_charlie_account_id,
&mut private_david_account_id,
);
}
Self {
public_accounts: vec![
(public_alice_private_key, 10_000),
(public_bob_private_key, 20_000),
],
private_accounts: vec![
(
private_charlie_key_chain,
Account {
balance: 10_000,
data: Data::default(),
program_owner: DEFAULT_PROGRAM_ID,
nonce: 0_u128.into(),
},
),
(
private_david_key_chain,
Account {
balance: 20_000,
data: Data::default(),
program_owner: DEFAULT_PROGRAM_ID,
nonce: 0_u128.into(),
},
),
],
}
}
fn sequencer_initial_public_accounts(&self) -> Vec<PublicAccountPublicInitialData> {
self.public_accounts
.iter()
.map(|(priv_key, balance)| {
let pub_key = PublicKey::new_from_private_key(priv_key);
let account_id = AccountId::from(&pub_key);
PublicAccountPublicInitialData {
account_id,
balance: *balance,
}
})
.collect()
}
fn sequencer_initial_private_accounts(&self) -> Vec<PrivateAccountPublicInitialData> {
self.private_accounts
.iter()
.map(|(key_chain, account)| PrivateAccountPublicInitialData {
npk: key_chain.nullifier_public_key,
account: account.clone(),
})
.collect()
}
fn wallet_initial_accounts(&self) -> Vec<InitialAccountData> {
self.public_accounts
.iter()
.map(|(priv_key, _)| {
let pub_key = PublicKey::new_from_private_key(priv_key);
let account_id = AccountId::from(&pub_key);
InitialAccountData::Public(PublicAccountPrivateInitialData {
account_id,
pub_sign_key: priv_key.clone(),
})
})
.chain(self.private_accounts.iter().map(|(key_chain, account)| {
InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData {
account: account.clone(),
key_chain: key_chain.clone(),
identifier: 0,
}))
}))
.collect()
}
}
#[derive(Debug, Clone, Copy)]
pub enum UrlProtocol {
Http,
@ -164,36 +62,11 @@ impl std::fmt::Display for UrlProtocol {
}
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn sequencer_config(
partial: SequencerPartialConfig,
home: PathBuf,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData,
genesis_transactions: Vec<GenesisAction>,
) -> Result<SequencerConfig> {
let SequencerPartialConfig {
max_num_tx_in_block,
@ -204,35 +77,76 @@ pub fn sequencer_config(
Ok(SequencerConfig {
home,
genesis_id: 1,
is_genesis_random: true,
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_secs(5),
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
genesis: genesis_transactions,
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
})
}
pub fn wallet_config(
sequencer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<WalletConfig> {
#[must_use]
pub fn default_public_accounts_for_wallet() -> Vec<(PrivateKey, u128)> {
let mut private_keys = vec![PrivateKey::new_os_random(), PrivateKey::new_os_random()];
private_keys.sort_unstable_by_key(|private_key| {
AccountId::from(&PublicKey::new_from_private_key(private_key))
});
private_keys
.into_iter()
.zip(INITIAL_PUBLIC_BALANCES_FOR_WALLET)
.collect()
}
#[must_use]
pub fn default_private_accounts_for_wallet() -> Vec<InitialPrivateAccountForWallet> {
let mut key_chains = vec![KeyChain::new_os_random(), KeyChain::new_os_random()];
key_chains.sort_unstable();
key_chains
.into_iter()
.zip(INITIAL_PRIVATE_BALANCES_FOR_WALLET)
.map(|(key_chain, balance)| InitialPrivateAccountForWallet {
key_chain,
identifier: 0,
balance,
})
.collect()
}
#[must_use]
pub fn genesis_from_accounts(
public_accounts: &[(PrivateKey, u128)],
private_accounts: &[InitialPrivateAccountForWallet],
) -> Vec<GenesisAction> {
let public_genesis = public_accounts.iter().map(|(private_key, balance)| {
let public_key = PublicKey::new_from_private_key(private_key);
let account_id = AccountId::from(&public_key);
GenesisAction::SupplyAccount {
account_id,
balance: *balance,
}
});
let private_genesis = private_accounts
.iter()
.map(|account| GenesisAction::SupplyAccount {
account_id: account.account_id(),
balance: account.balance,
});
public_genesis.chain(private_genesis).collect()
}
pub fn wallet_config(sequencer_addr: SocketAddr) -> Result<WalletConfig> {
Ok(WalletConfig {
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?,
@ -240,11 +154,23 @@ pub fn wallet_config(
seq_tx_poll_max_blocks: 15,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,
initial_accounts: Some(initial_data.wallet_initial_accounts()),
basic_auth: None,
})
}
pub fn indexer_config(bedrock_addr: SocketAddr, home: PathBuf) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
channel_id: bedrock_channel_id(),
})
}
pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>

View File

@ -0,0 +1,34 @@
//! Thin client wrapper for querying the indexer's JSON-RPC API in tests.
//!
//! The sequencer doesn't depend on the indexer at runtime — finalization comes
//! from zone-sdk events. This wrapper exists purely for test ergonomics so
//! integration tests can construct a single connection and call
//! `indexer_service_rpc::RpcClient` methods directly via `Deref`.
use std::ops::Deref;
use anyhow::{Context as _, Result};
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
use log::info;
use url::Url;
pub struct IndexerClient(WsClient);
impl IndexerClient {
pub async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = WsClientBuilder::default()
.build(indexer_url)
.await
.context("Failed to create websocket client")?;
Ok(Self(client))
}
}
impl Deref for IndexerClient {
type Target = WsClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -1,6 +1,6 @@
//! This library contains common code for integration tests.
use std::sync::LazyLock;
use std::{net::SocketAddr, sync::LazyLock};
use anyhow::{Context as _, Result};
use common::{HashType, transaction::NSSATransaction};
@ -9,29 +9,56 @@ use indexer_service::IndexerHandle;
use log::{debug, error};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_core::config::GenesisAction;
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::WalletCore;
use wallet::{WalletCore, account::AccountIdWithPrivacy, cli::CliAccountMention};
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet};
use crate::{
indexer_client::IndexerClient,
setup::{
setup_bedrock_node, setup_indexer, setup_private_accounts_with_initial_supply,
setup_public_accounts_with_initial_supply, setup_sequencer, setup_wallet,
},
};
pub mod config;
pub mod indexer_client;
pub mod setup;
pub mod test_context_ffi;
// TODO: Remove this and control time from tests
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
pub const NSSA_PROGRAM_FOR_TEST_DATA_CHANGER: &str = "data_changer.bin";
pub const NSSA_PROGRAM_FOR_TEST_NOOP: &str = "noop.bin";
pub const NSSA_PROGRAM_FOR_TEST_PDA_FUND_SPEND_PROXY: &str = "pda_fund_spend_proxy.bin";
const BEDROCK_SERVICE_WITH_OPEN_PORT: &str = "logos-blockchain-node-0";
const BEDROCK_SERVICE_PORT: u16 = 18080;
static LOGGER: LazyLock<()> = LazyLock::new(env_logger::init);
struct IndexerComponents {
indexer_handle: IndexerHandle,
indexer_client: IndexerClient,
_temp_dir: TempDir,
}
impl Drop for IndexerComponents {
fn drop(&mut self) {
let Self {
indexer_handle,
indexer_client: _,
_temp_dir: _,
} = self;
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before IndexerComponents drop");
}
}
}
/// Test context which sets up a sequencer and a wallet for integration tests.
///
/// It's memory and logically safe to create multiple instances of this struct in parallel tests,
@ -39,14 +66,13 @@ static LOGGER: LazyLock<()> = LazyLock::new(env_logger::init);
// NOTE: Order of fields is important for proper drop order.
pub struct TestContext {
sequencer_client: SequencerClient,
indexer_client: IndexerClient,
wallet: WalletCore,
wallet_password: String,
/// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
indexer_handle: IndexerHandle,
indexer_components: Option<IndexerComponents>,
bedrock_compose: DockerCompose,
_temp_indexer_dir: TempDir,
bedrock_addr: SocketAddr,
_temp_sequencer_dir: TempDir,
_temp_wallet_dir: TempDir,
}
@ -57,65 +83,12 @@ impl TestContext {
Self::builder().build().await
}
/// Get a builder for the test context to customize its configuration.
#[must_use]
pub const fn builder() -> TestContextBuilder {
TestContextBuilder::new()
}
async fn new_configured(
sequencer_partial_config: config::SequencerPartialConfig,
initial_data: config::InitialData,
) -> Result<Self> {
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = setup_bedrock_node().await?;
let (indexer_handle, temp_indexer_dir) = setup_indexer(bedrock_addr, &initial_data)
.await
.context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
sequencer_partial_config,
bedrock_addr,
indexer_handle.addr(),
&initial_data,
)
.await
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) =
setup_wallet(sequencer_handle.addr(), &initial_data)
.await
.context("Failed to setup wallet")?;
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
.context("Failed to convert sequencer addr to URL")?;
let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr())
.context("Failed to convert indexer addr to URL")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
let indexer_client = IndexerClient::new(&indexer_url)
.await
.context("Failed to create indexer client")?;
Ok(Self {
sequencer_client,
indexer_client,
wallet,
wallet_password,
bedrock_compose,
sequencer_handle: Some(sequencer_handle),
indexer_handle,
_temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
}
/// Get reference to the wallet.
#[must_use]
pub const fn wallet(&self) -> &WalletCore {
@ -138,10 +111,38 @@ impl TestContext {
&self.sequencer_client
}
/// Get reference to the indexer client.
/// Get the Bedrock Node address.
#[must_use]
pub const fn indexer_client(&self) -> &IndexerClient {
&self.indexer_client
pub const fn bedrock_addr(&self) -> SocketAddr {
self.bedrock_addr
}
/// Get reference to the indexer.
///
/// # Panics
///
/// Panics if the indexer is not enabled in the test context. See
/// [`TestContextBuilder::disable_indexer()`].
#[must_use]
pub fn indexer(&self) -> &IndexerHandle {
self.indexer_components
.as_ref()
.map(|components| &components.indexer_handle)
.expect("Called `TestContext::indexer()` on context with disabled indexer")
}
/// Get reference to the indexer client.
///
/// # Panics
///
/// Panics if the indexer is not enabled in the test context. See
/// [`TestContextBuilder::disable_indexer()`].
#[must_use]
pub fn indexer_client(&self) -> &IndexerClient {
self.indexer_components
.as_ref()
.map(|components| &components.indexer_client)
.expect("Called `TestContext::indexer_client()` on context with disabled indexer")
}
/// Get existing public account IDs in the wallet.
@ -149,8 +150,9 @@ impl TestContext {
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.key_chain()
.public_account_ids()
.map(|(account_id, _idx)| account_id)
.collect()
}
@ -159,8 +161,9 @@ impl TestContext {
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.key_chain()
.private_account_ids()
.map(|(account_id, _idx)| account_id)
.collect()
}
}
@ -169,15 +172,14 @@ impl Drop for TestContext {
fn drop(&mut self) {
let Self {
sequencer_handle,
indexer_handle,
bedrock_compose,
_temp_indexer_dir: _,
_temp_sequencer_dir: _,
_temp_wallet_dir: _,
bedrock_addr: _,
indexer_components: _,
sequencer_client: _,
indexer_client: _,
wallet: _,
wallet_password: _,
_temp_sequencer_dir: _,
_temp_wallet_dir: _,
} = self;
let sequencer_handle = sequencer_handle
@ -193,10 +195,6 @@ impl Drop for TestContext {
);
}
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop");
}
let container = bedrock_compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.unwrap_or_else(|| {
@ -217,43 +215,24 @@ impl Drop for TestContext {
}
}
/// A test context to be used in normal #[test] tests.
pub struct BlockingTestContext {
ctx: Option<TestContext>,
runtime: tokio::runtime::Runtime,
}
impl BlockingTestContext {
pub fn new() -> Result<Self> {
let runtime = tokio::runtime::Runtime::new().unwrap();
let ctx = runtime.block_on(TestContext::new())?;
Ok(Self {
ctx: Some(ctx),
runtime,
})
}
pub const fn ctx(&self) -> &TestContext {
self.ctx.as_ref().expect("TestContext is set")
}
}
pub struct TestContextBuilder {
initial_data: Option<config::InitialData>,
genesis_transactions: Option<Vec<GenesisAction>>,
sequencer_partial_config: Option<config::SequencerPartialConfig>,
enable_indexer: bool,
}
impl TestContextBuilder {
const fn new() -> Self {
Self {
initial_data: None,
genesis_transactions: None,
sequencer_partial_config: None,
enable_indexer: true,
}
}
#[must_use]
pub fn with_initial_data(mut self, initial_data: config::InitialData) -> Self {
self.initial_data = Some(initial_data);
pub fn with_genesis(mut self, genesis_transactions: Vec<GenesisAction>) -> Self {
self.genesis_transactions = Some(genesis_transactions);
self
}
@ -266,14 +245,145 @@ impl TestContextBuilder {
self
}
/// Exclude Indexer from test context.
/// Indexer is enabled by default.
///
/// Methods like [`TestContext::indexer()`] and [`TestContext::indexer_client()`] will panic if
/// called when indexer is disabled.
#[must_use]
pub const fn disable_indexer(mut self) -> Self {
self.enable_indexer = false;
self
}
pub async fn build(self) -> Result<TestContext> {
TestContext::new_configured(
self.sequencer_partial_config.unwrap_or_default(),
self.initial_data.unwrap_or_else(|| {
config::InitialData::with_two_public_and_two_private_initialized_accounts()
let Self {
genesis_transactions,
sequencer_partial_config,
enable_indexer,
} = self;
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = setup_bedrock_node()
.await
.context("Failed to setup Bedrock node")?;
let indexer_components = if enable_indexer {
let (indexer_handle, temp_indexer_dir) = setup_indexer(bedrock_addr)
.await
.context("Failed to setup Indexer")?;
let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr())
.context("Failed to convert indexer addr to URL")?;
let indexer_client = IndexerClient::new(&indexer_url)
.await
.context("Failed to create indexer client")?;
Some(IndexerComponents {
indexer_handle,
indexer_client,
_temp_dir: temp_indexer_dir,
})
} else {
None
};
let initial_public_accounts = config::default_public_accounts_for_wallet();
let initial_private_accounts = config::default_private_accounts_for_wallet();
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
sequencer_partial_config.unwrap_or_default(),
bedrock_addr,
genesis_transactions.unwrap_or_else(|| {
config::genesis_from_accounts(&initial_public_accounts, &initial_private_accounts)
}),
)
.await
.context("Failed to setup Sequencer")?;
let (mut wallet, temp_wallet_dir, wallet_password) = setup_wallet(
sequencer_handle.addr(),
&initial_public_accounts,
&initial_private_accounts,
)
.context("Failed to setup wallet")?;
setup_public_accounts_with_initial_supply(&wallet, &initial_public_accounts)
.await
.context("Failed to initialize public accounts in wallet")?;
setup_private_accounts_with_initial_supply(&mut wallet, &initial_private_accounts)
.await
.context("Failed to initialize private accounts in wallet")?;
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
.context("Failed to convert sequencer addr to URL")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
Ok(TestContext {
sequencer_client,
wallet,
wallet_password,
bedrock_compose,
bedrock_addr,
sequencer_handle: Some(sequencer_handle),
indexer_components,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
}
pub fn build_blocking(self) -> Result<BlockingTestContext> {
let runtime = tokio::runtime::Runtime::new().context("Failed to create Tokio runtime")?;
let ctx = runtime.block_on(self.build())?;
Ok(BlockingTestContext {
ctx: Some(ctx),
runtime,
})
}
}
/// A test context to be used in normal #[test] tests.
pub struct BlockingTestContext {
ctx: Option<TestContext>,
runtime: tokio::runtime::Runtime,
}
impl BlockingTestContext {
pub fn new() -> Result<Self> {
TestContext::builder().build_blocking()
}
pub const fn ctx(&self) -> &TestContext {
self.ctx.as_ref().expect("TestContext is set")
}
pub const fn runtime(&self) -> &tokio::runtime::Runtime {
&self.runtime
}
pub fn block_on<'ctx, F>(&'ctx self, f: impl FnOnce(&'ctx TestContext) -> F) -> F::Output
where
F: std::future::Future + 'ctx,
{
let future = f(self.ctx());
self.runtime.block_on(future)
}
pub fn block_on_mut<'ctx, F>(
&'ctx mut self,
f: impl FnOnce(&'ctx mut TestContext) -> F,
) -> F::Output
where
F: std::future::Future + 'ctx,
{
let ctx_mut = self.ctx.as_mut().expect("TestContext is set");
let future = f(ctx_mut);
self.runtime.block_on(future)
}
}
@ -291,13 +401,13 @@ impl Drop for BlockingTestContext {
}
#[must_use]
pub fn format_public_account_id(account_id: AccountId) -> String {
format!("Public/{account_id}")
pub const fn public_mention(account_id: AccountId) -> CliAccountMention {
CliAccountMention::Id(AccountIdWithPrivacy::Public(account_id))
}
#[must_use]
pub fn format_private_account_id(account_id: AccountId) -> String {
format!("Private/{account_id}")
pub const fn private_mention(account_id: AccountId) -> CliAccountMention {
CliAccountMention::Id(AccountIdWithPrivacy::Private(account_id))
}
#[expect(

View File

@ -1,27 +1,24 @@
use std::{
ffi::{CString, c_char},
fs::File,
io::Write as _,
net::SocketAddr,
path::PathBuf,
};
use std::{collections::HashMap, net::SocketAddr, path::PathBuf};
use anyhow::{Context as _, Result, bail};
use indexer_ffi::{IndexerServiceFFI, api::lifecycle::InitializedIndexerServiceFFIResult};
use common::transaction::NSSATransaction;
use indexer_service::IndexerHandle;
use log::{debug, warn};
use sequencer_service::SequencerHandle;
use nssa::{AccountId, PrivateKey, PublicKey, PublicTransaction, program::Program};
use sequencer_service::{GenesisAction, SequencerHandle};
use sequencer_service_rpc::RpcClient as _;
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides};
use wallet::{
AccDecodeData::Decode, PrivacyPreservingAccount, WalletCore, config::WalletConfigOverrides,
};
use crate::{BEDROCK_SERVICE_PORT, BEDROCK_SERVICE_WITH_OPEN_PORT, config};
use crate::{
BEDROCK_SERVICE_PORT, BEDROCK_SERVICE_WITH_OPEN_PORT,
config::{self, InitialPrivateAccountForWallet},
};
unsafe extern "C" {
fn start_indexer(config_path: *const c_char, port: u16) -> InitializedIndexerServiceFFIResult;
}
pub(crate) async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
pub async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let bedrock_compose_path = PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
@ -91,10 +88,7 @@ pub(crate) async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)>
Ok((compose, addr))
}
pub(crate) async fn setup_indexer(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerHandle, TempDir)> {
pub async fn setup_indexer(bedrock_addr: SocketAddr) -> Result<(IndexerHandle, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
@ -103,12 +97,8 @@ pub(crate) async fn setup_indexer(
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
let indexer_config = config::indexer_config(bedrock_addr, temp_indexer_dir.path().to_owned())
.context("Failed to create Indexer config")?;
indexer_service::run_server(indexer_config, 0)
.await
@ -116,11 +106,10 @@ pub(crate) async fn setup_indexer(
.map(|handle| (handle, temp_indexer_dir))
}
pub(crate) async fn setup_sequencer(
pub async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
genesis_transactions: Vec<GenesisAction>,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
@ -134,8 +123,7 @@ pub(crate) async fn setup_sequencer(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
genesis_transactions,
)
.context("Failed to create Sequencer config")?;
@ -144,12 +132,12 @@ pub(crate) async fn setup_sequencer(
Ok((sequencer_handle, temp_sequencer_dir))
}
pub(crate) async fn setup_wallet(
pub fn setup_wallet(
sequencer_addr: SocketAddr,
initial_data: &config::InitialData,
initial_public_accounts: &[(PrivateKey, u128)],
initial_private_accounts: &[InitialPrivateAccountForWallet],
) -> Result<(WalletCore, TempDir, String)> {
let config = config::wallet_config(sequencer_addr, initial_data)
.context("Failed to create Wallet config")?;
let config = config::wallet_config(sequencer_addr).context("Failed to create Wallet config")?;
let config_serialized =
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
@ -164,57 +152,176 @@ pub(crate) async fn setup_wallet(
let config_overrides = WalletConfigOverrides::default();
let wallet_password = "test_pass".to_owned();
let (wallet, _mnemonic) = WalletCore::new_init_storage(
let (mut wallet, _mnemonic) = WalletCore::new_init_storage(
config_path,
storage_path,
Some(config_overrides),
&wallet_password,
)
.context("Failed to init wallet")?;
for (private_key, _balance) in initial_public_accounts {
wallet
.storage_mut()
.key_chain_mut()
.add_imported_public_account(private_key.clone());
}
for private_account in initial_private_accounts {
wallet
.storage_mut()
.key_chain_mut()
.add_imported_private_account(
private_account.key_chain.clone(),
None,
private_account.identifier,
nssa::Account::default(),
);
}
wallet
.store_persistent_data()
.await
.context("Failed to store wallet persistent data")?;
Ok((wallet, temp_wallet_dir, wallet_password))
}
pub(crate) fn setup_indexer_ffi(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerServiceFFI, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
let config_json = serde_json::to_vec(&indexer_config)?;
let config_path = temp_indexer_dir.path().join("indexer_config.json");
let mut file = File::create(config_path.as_path())?;
file.write_all(&config_json)?;
file.flush()?;
let res =
// SAFETY: lib function ensures validity of value.
unsafe { start_indexer(CString::new(config_path.to_str().unwrap())?.as_ptr(), 0) };
if res.error.is_error() {
anyhow::bail!("Indexer FFI error {:?}", res.error);
pub async fn setup_public_accounts_with_initial_supply(
wallet: &WalletCore,
initial_public_accounts: &[(PrivateKey, u128)],
) -> Result<()> {
for (private_key, amount) in initial_public_accounts {
claim_funds_from_vault(
wallet,
AccountId::from(&PublicKey::new_from_private_key(private_key)),
*amount,
)
.await
.context("Failed to claim funds from vault into public account")?;
}
Ok((
// SAFETY: lib function ensures validity of value.
unsafe { std::ptr::read(res.value) },
temp_indexer_dir,
))
Ok(())
}
pub async fn setup_private_accounts_with_initial_supply(
wallet: &mut WalletCore,
initial_private_accounts: &[InitialPrivateAccountForWallet],
) -> Result<()> {
for private_account in initial_private_accounts {
claim_funds_from_vault_to_private(
wallet,
private_account.account_id(),
private_account.balance,
)
.await
.context("Failed to claim funds from vault into private account")?;
}
Ok(())
}
async fn claim_funds_from_vault(
wallet: &WalletCore,
owner_id: AccountId,
amount: u128,
) -> Result<()> {
let vault_program_id = Program::vault().id();
let owner_vault_id = vault_core::compute_vault_account_id(vault_program_id, owner_id);
let nonces = wallet
.get_accounts_nonces(vec![owner_id])
.await
.context("Failed to fetch owner nonce")?;
let signing_key = wallet
.storage()
.key_chain()
.pub_account_signing_key(owner_id)
.with_context(|| format!("Missing signing key for public account {owner_id}"))?;
let message = nssa::public_transaction::Message::try_new(
vault_program_id,
vec![owner_id, owner_vault_id],
nonces,
vault_core::Instruction::Claim { amount },
)
.context("Failed to build vault claim message")?;
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]);
let tx = PublicTransaction::new(message, witness_set);
let tx_hash = wallet
.sequencer_client
.send_transaction(NSSATransaction::Public(tx))
.await
.context("Failed to submit vault claim transaction")?;
wallet
.poll_native_token_transfer(tx_hash)
.await
.context("Failed to confirm vault claim transaction")?;
Ok(())
}
async fn claim_funds_from_vault_to_private(
wallet: &mut WalletCore,
owner_id: AccountId,
amount: u128,
) -> Result<()> {
let Some(_) = wallet.storage().key_chain().private_account(owner_id) else {
bail!("Missing private account in wallet key chain for account {owner_id}");
};
let vault_program = Program::vault();
let vault_program_id = vault_program.id();
let owner_vault_id = vault_core::compute_vault_account_id(vault_program_id, owner_id);
let instruction_data =
Program::serialize_instruction(vault_core::Instruction::Claim { amount })
.context("Failed to serialize vault private claim instruction")?;
let program_with_dependencies =
nssa::privacy_preserving_transaction::circuit::ProgramWithDependencies::new(
vault_program,
HashMap::from([(
Program::authenticated_transfer_program().id(),
Program::authenticated_transfer_program(),
)]),
);
let (tx_hash, mut secrets) = wallet
.send_privacy_preserving_tx(
vec![
PrivacyPreservingAccount::PrivateOwned(owner_id),
PrivacyPreservingAccount::Public(owner_vault_id),
],
instruction_data,
&program_with_dependencies,
)
.await
.context("Failed to submit private vault claim transaction")?;
let secret = secrets
.pop()
.context("Expected one private output secret for vault claim")?;
let transfer_tx = wallet
.poll_native_token_transfer(tx_hash)
.await
.context("Failed to confirm private vault claim transaction")?;
let NSSATransaction::PrivacyPreserving(tx) = transfer_tx else {
bail!("Expected privacy preserving transaction result for private vault claim");
};
wallet
.decode_insert_privacy_preserving_transaction_results(&tx, &[Decode(secret, owner_id)])
.context("Failed to decode private vault claim transaction")?;
wallet
.store_persistent_data()
.context("Failed to store wallet data after private vault claim")?;
Ok(())
}

View File

@ -1,296 +0,0 @@
use std::sync::Arc;
use anyhow::{Context as _, Result};
use futures::FutureExt as _;
use indexer_ffi::IndexerServiceFFI;
use indexer_service_rpc::RpcClient as _;
use log::{debug, error};
use nssa::AccountId;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::WalletCore;
use crate::{
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
};
/// Test context which sets up a sequencer, indexer through ffi and a wallet for integration tests.
///
/// It's memory and logically safe to create multiple instances of this struct in parallel tests,
/// as each instance uses its own temporary directories for sequencer and wallet data.
// NOTE: Order of fields is important for proper drop order.
pub struct TestContextFFI {
sequencer_client: SequencerClient,
indexer_client: IndexerClient,
wallet: WalletCore,
wallet_password: String,
/// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
bedrock_compose: DockerCompose,
_temp_indexer_dir: TempDir,
_temp_sequencer_dir: TempDir,
_temp_wallet_dir: TempDir,
}
#[expect(
clippy::multiple_inherent_impl,
reason = "It is more natural to have this implementation here"
)]
impl TestContextBuilder {
pub fn build_ffi(
self,
runtime: &Arc<tokio::runtime::Runtime>,
) -> Result<(TestContextFFI, IndexerServiceFFI)> {
TestContextFFI::new_configured(
self.sequencer_partial_config.unwrap_or_default(),
&self.initial_data.unwrap_or_else(|| {
config::InitialData::with_two_public_and_two_private_initialized_accounts()
}),
runtime,
)
}
}
impl TestContextFFI {
/// Create new test context.
pub fn new(runtime: &Arc<tokio::runtime::Runtime>) -> Result<(Self, IndexerServiceFFI)> {
Self::builder().build_ffi(runtime)
}
#[must_use]
pub const fn builder() -> TestContextBuilder {
TestContextBuilder::new()
}
fn new_configured(
sequencer_partial_config: config::SequencerPartialConfig,
initial_data: &config::InitialData,
runtime: &Arc<tokio::runtime::Runtime>,
) -> Result<(Self, IndexerServiceFFI)> {
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = runtime.block_on(setup_bedrock_node())?;
let (indexer_ffi, temp_indexer_dir) =
setup_indexer_ffi(bedrock_addr, initial_data).context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = runtime
.block_on(setup_sequencer(
sequencer_partial_config,
bedrock_addr,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
initial_data,
))
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) = runtime
.block_on(setup_wallet(sequencer_handle.addr(), initial_data))
.context("Failed to setup wallet")?;
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
.context("Failed to convert sequencer addr to URL")?;
let indexer_url = config::addr_to_url(
config::UrlProtocol::Ws,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
)
.context("Failed to convert indexer addr to URL")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
let indexer_client = runtime
.block_on(IndexerClient::new(&indexer_url))
.context("Failed to create indexer client")?;
Ok((
Self {
sequencer_client,
indexer_client,
wallet,
wallet_password,
bedrock_compose,
sequencer_handle: Some(sequencer_handle),
_temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
},
indexer_ffi,
))
}
/// Get reference to the wallet.
#[must_use]
pub const fn wallet(&self) -> &WalletCore {
&self.wallet
}
#[must_use]
pub fn wallet_password(&self) -> &str {
&self.wallet_password
}
/// Get mutable reference to the wallet.
pub const fn wallet_mut(&mut self) -> &mut WalletCore {
&mut self.wallet
}
/// Get reference to the sequencer client.
#[must_use]
pub const fn sequencer_client(&self) -> &SequencerClient {
&self.sequencer_client
}
/// Get reference to the indexer client.
#[must_use]
pub const fn indexer_client(&self) -> &IndexerClient {
&self.indexer_client
}
/// Get existing public account IDs in the wallet.
#[must_use]
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.public_account_ids()
.collect()
}
/// Get existing private account IDs in the wallet.
#[must_use]
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.private_account_ids()
.collect()
}
pub fn get_last_block_sequencer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
Ok(runtime.block_on(self.sequencer_client.get_last_block_id())?)
}
pub fn get_last_block_indexer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
Ok(runtime.block_on(self.indexer_client.get_last_finalized_block_id())?)
}
}
impl Drop for TestContextFFI {
fn drop(&mut self) {
let Self {
sequencer_handle,
bedrock_compose,
_temp_indexer_dir: _,
_temp_sequencer_dir: _,
_temp_wallet_dir: _,
sequencer_client: _,
indexer_client: _,
wallet: _,
wallet_password: _,
} = self;
let sequencer_handle = sequencer_handle
.take()
.expect("Sequencer handle should be present in TestContext drop");
if !sequencer_handle.is_healthy() {
let Err(err) = sequencer_handle
.failed()
.now_or_never()
.expect("Sequencer handle should not be running");
error!(
"Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
);
}
let container = bedrock_compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.unwrap_or_else(|| {
panic!("Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`")
});
let output = std::process::Command::new("docker")
.args(["inspect", "-f", "{{.State.Running}}", container.id()])
.output()
.expect("Failed to execute docker inspect command to check if Bedrock container is still running");
let stdout = String::from_utf8(output.stdout)
.expect("Failed to parse docker inspect output as String");
if stdout.trim() != "true" {
error!(
"Bedrock container `{}` is not running during TestContext drop, docker inspect output: {stdout}",
container.id()
);
}
}
}
/// A test context with ffi to be used in normal #[test] tests.
pub struct BlockingTestContextFFI {
ctx: Option<TestContextFFI>,
runtime: Arc<tokio::runtime::Runtime>,
indexer_ffi: IndexerServiceFFI,
}
impl BlockingTestContextFFI {
pub fn new() -> Result<Self> {
let runtime = tokio::runtime::Runtime::new().unwrap();
let runtime_wrapped = Arc::new(runtime);
let (ctx, indexer_ffi) = TestContextFFI::new(&runtime_wrapped)?;
Ok(Self {
ctx: Some(ctx),
runtime: runtime_wrapped,
indexer_ffi,
})
}
#[must_use]
pub const fn ctx(&self) -> &TestContextFFI {
self.ctx.as_ref().expect("TestContext is set")
}
#[must_use]
pub const fn ctx_mut(&mut self) -> &mut TestContextFFI {
self.ctx.as_mut().expect("TestContext is set")
}
#[must_use]
pub const fn runtime(&self) -> &Arc<tokio::runtime::Runtime> {
&self.runtime
}
#[must_use]
pub fn runtime_clone(&self) -> Arc<tokio::runtime::Runtime> {
Arc::<tokio::runtime::Runtime>::clone(&self.runtime)
}
}
impl Drop for BlockingTestContextFFI {
fn drop(&mut self) {
let Self {
ctx,
runtime,
indexer_ffi,
} = self;
// Ensure async cleanup of TestContext by blocking on its drop in the runtime.
runtime.block_on(async {
if let Some(ctx) = ctx.take() {
drop(ctx);
}
});
let indexer_handle =
// SAFETY: lib function ensures validity of value.
unsafe { indexer_ffi.handle() };
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop");
}
}
}

View File

@ -3,16 +3,21 @@
reason = "We don't care about these in tests"
)]
use anyhow::Result;
use integration_tests::{TestContext, format_private_account_id};
use anyhow::{Context as _, Result};
use integration_tests::{TestContext, private_mention};
use key_protocol::key_management::KeyChain;
use log::info;
use nssa::program::Program;
use nssa::{Data, program::Program};
use nssa_core::account::Nonce;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command,
account::{AccountSubcommand, NewSubcommand},
execute_subcommand,
use wallet::{
account::{AccountIdWithPrivacy, HumanReadableAccount, Label},
cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, ImportSubcommand, NewSubcommand},
execute_subcommand,
},
};
#[test]
@ -30,7 +35,7 @@ async fn get_existing_account() -> Result<()> {
);
assert_eq!(account.balance, 10000);
assert!(account.data.is_empty());
assert_eq!(account.nonce.0, 0);
assert_eq!(account.nonce.0, 1);
info!("Successfully retrieved account with correct details");
@ -41,7 +46,7 @@ async fn get_existing_account() -> Result<()> {
async fn new_public_account_with_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
let label = "my-test-public-account".to_owned();
let label = Label::new("my-test-public-account");
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(label.clone()),
@ -55,14 +60,9 @@ async fn new_public_account_with_label() -> Result<()> {
};
// Verify the label was stored
let stored_label = ctx
.wallet()
.storage()
.labels
.get(&account_id.to_string())
.expect("Label should be stored for the new account");
let resolved = ctx.wallet().storage().resolve_label(&label);
assert_eq!(stored_label.to_string(), label);
assert_eq!(resolved, Some(AccountIdWithPrivacy::Public(account_id)));
info!("Successfully created public account with label");
@ -74,23 +74,17 @@ async fn add_label_to_existing_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let account_id = ctx.existing_private_accounts()[0];
let label = "my-test-private-account".to_owned();
let label = Label::new("my-test-private-account");
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_private_account_id(account_id)),
account_label: None,
account_id: private_mention(account_id),
label: label.clone(),
});
execute_subcommand(ctx.wallet_mut(), command).await?;
let stored_label = ctx
.wallet()
.storage()
.labels
.get(&account_id.to_string())
.expect("Label should be stored for the account");
let resolved = ctx.wallet().storage().resolve_label(&label);
assert_eq!(stored_label.to_string(), label);
assert_eq!(resolved, Some(AccountIdWithPrivacy::Private(account_id)));
info!("Successfully set label on existing private account");
@ -114,12 +108,13 @@ async fn new_public_account_without_label() -> Result<()> {
panic!("Expected RegisterAccount return value")
};
// Verify no label was stored
// Verify no label was stored for the account id
assert!(
!ctx.wallet()
ctx.wallet()
.storage()
.labels
.contains_key(&account_id.to_string()),
.labels_for_account(AccountIdWithPrivacy::Public(account_id))
.next()
.is_none(),
"No label should be stored when not provided"
);
@ -127,3 +122,150 @@ async fn new_public_account_without_label() -> Result<()> {
Ok(())
}
#[test]
async fn import_public_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let private_key = nssa::PrivateKey::new_os_random();
let account_id = nssa::AccountId::from(&nssa::PublicKey::new_from_private_key(&private_key));
let command = Command::Account(AccountSubcommand::Import(ImportSubcommand::Public {
private_key,
}));
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::Empty = sub_ret else {
anyhow::bail!("Expected Empty return value");
};
let imported_key = ctx
.wallet()
.storage()
.key_chain()
.pub_account_signing_key(account_id);
assert!(
imported_key.is_some(),
"Imported public account should be present"
);
Ok(())
}
#[test]
async fn import_private_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let key_chain = KeyChain::new_os_random();
let account_id = nssa::AccountId::from((&key_chain.nullifier_public_key, 0));
let account = nssa::Account {
program_owner: Program::authenticated_transfer_program().id(),
balance: 777,
data: Data::default(),
nonce: Nonce::default(),
};
let key_chain_json = serde_json::to_string(&key_chain)
.context("Failed to serialize key chain for private import")?;
let account_state = HumanReadableAccount::from(account.clone());
let command = Command::Account(AccountSubcommand::Import(ImportSubcommand::Private {
key_chain_json,
account_state,
chain_index: None,
identifier: 0,
}));
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::Empty = sub_ret else {
anyhow::bail!("Expected Empty return value");
};
let imported_acc = ctx
.wallet()
.storage()
.key_chain()
.private_account(account_id)
.context("Imported private account should be present")?;
assert_eq!(
imported_acc.key_chain.secret_spending_key,
key_chain.secret_spending_key
);
assert_eq!(
imported_acc.key_chain.nullifier_public_key,
key_chain.nullifier_public_key
);
assert_eq!(
imported_acc.key_chain.viewing_public_key,
key_chain.viewing_public_key
);
assert_eq!(imported_acc.chain_index, None);
assert_eq!(imported_acc.kind.identifier(), 0);
assert_eq!(imported_acc.account, &account);
Ok(())
}
#[test]
async fn import_private_account_second_time_overrides_account_data() -> Result<()> {
let mut ctx = TestContext::new().await?;
let key_chain = KeyChain::new_os_random();
let account_id = nssa::AccountId::from((&key_chain.nullifier_public_key, 0));
let key_chain_json =
serde_json::to_string(&key_chain).context("Failed to serialize key chain")?;
let initial_account = nssa::Account {
program_owner: Program::authenticated_transfer_program().id(),
balance: 100,
data: Data::default(),
nonce: Nonce::default(),
};
// First import
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::Import(ImportSubcommand::Private {
key_chain_json: key_chain_json.clone(),
account_state: HumanReadableAccount::from(initial_account),
chain_index: None,
identifier: 0,
})),
)
.await?;
let updated_account = nssa::Account {
program_owner: Program::authenticated_transfer_program().id(),
balance: 999,
data: Data::default(),
nonce: Nonce::default(),
};
// Second import with different account data (same key chain)
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::Import(ImportSubcommand::Private {
key_chain_json,
account_state: HumanReadableAccount::from(updated_account.clone()),
chain_index: None,
identifier: 0,
})),
)
.await?;
let imported = ctx
.wallet()
.storage()
.key_chain()
.private_account(account_id)
.context("Imported private account should be present")?;
assert_eq!(
imported.account, &updated_account,
"Second import should override account data"
);
Ok(())
}

View File

@ -7,14 +7,17 @@
use std::time::Duration;
use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, public_mention};
use log::info;
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::{amm::AmmProgramAgnosticSubcommand, token::TokenProgramAgnosticSubcommand},
use wallet::{
account::Label,
cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::{amm::AmmProgramAgnosticSubcommand, token::TokenProgramAgnosticSubcommand},
},
};
#[test]
@ -113,10 +116,8 @@ async fn amm_public() -> Result<()> {
// Create new token
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_1)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_1)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id_1),
supply_account_id: public_mention(supply_account_id_1),
name: "A NAM1".to_owned(),
total_supply: 37,
@ -127,15 +128,12 @@ async fn amm_public() -> Result<()> {
// Transfer 7 tokens from `supply_acc` to the account at account_id `recipient_account_id_1`
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_1)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id_1)),
to_label: None,
from: public_mention(supply_account_id_1),
to: Some(public_mention(recipient_account_id_1)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 7,
from_key_path: None,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -144,10 +142,8 @@ async fn amm_public() -> Result<()> {
// Create new token
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_2)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_2)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id_2),
supply_account_id: public_mention(supply_account_id_2),
name: "A NAM2".to_owned(),
total_supply: 37,
@ -158,15 +154,12 @@ async fn amm_public() -> Result<()> {
// Transfer 7 tokens from `supply_acc` to the account at account_id `recipient_account_id_2`
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_2)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id_2)),
to_label: None,
from: public_mention(supply_account_id_2),
to: Some(public_mention(recipient_account_id_2)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 7,
from_key_path: None,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -195,12 +188,9 @@ async fn amm_public() -> Result<()> {
// Send creation tx
let subcommand = AmmProgramAgnosticSubcommand::New {
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
user_holding_a: public_mention(recipient_account_id_1),
user_holding_b: public_mention(recipient_account_id_2),
user_holding_lp: public_mention(user_holding_lp),
balance_a: 3,
balance_b: 3,
};
@ -241,13 +231,11 @@ async fn amm_public() -> Result<()> {
// Make swap
let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput {
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_a: public_mention(recipient_account_id_1),
user_holding_b: public_mention(recipient_account_id_2),
amount_in: 2,
min_amount_out: 1,
token_definition: definition_account_id_1.to_string(),
token_definition: definition_account_id_1,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::AMM(subcommand)).await?;
@ -286,13 +274,11 @@ async fn amm_public() -> Result<()> {
// Make swap
let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput {
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_a: public_mention(recipient_account_id_1),
user_holding_b: public_mention(recipient_account_id_2),
amount_in: 2,
min_amount_out: 1,
token_definition: definition_account_id_2.to_string(),
token_definition: definition_account_id_2,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::AMM(subcommand)).await?;
@ -331,12 +317,9 @@ async fn amm_public() -> Result<()> {
// Add liquidity
let subcommand = AmmProgramAgnosticSubcommand::AddLiquidity {
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
user_holding_a: public_mention(recipient_account_id_1),
user_holding_b: public_mention(recipient_account_id_2),
user_holding_lp: public_mention(user_holding_lp),
min_amount_lp: 1,
max_amount_a: 2,
max_amount_b: 2,
@ -378,12 +361,9 @@ async fn amm_public() -> Result<()> {
// Remove liquidity
let subcommand = AmmProgramAgnosticSubcommand::RemoveLiquidity {
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
user_holding_a: public_mention(recipient_account_id_1),
user_holding_b: public_mention(recipient_account_id_2),
user_holding_lp: public_mention(user_holding_lp),
balance_lp: 2,
min_amount_a: 1,
min_amount_b: 1,
@ -459,14 +439,14 @@ async fn amm_new_pool_using_labels() -> Result<()> {
};
// Create holding_a with a label
let holding_a_label = "amm-holding-a-label".to_owned();
let holding_a_label = Label::new("amm-holding-a-label");
let SubcommandReturnValue::RegisterAccount {
account_id: holding_a_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_a_label.clone()),
label: Some(Label::new(holding_a_label.clone())),
})),
)
.await?
@ -504,14 +484,14 @@ async fn amm_new_pool_using_labels() -> Result<()> {
};
// Create holding_b with a label
let holding_b_label = "amm-holding-b-label".to_owned();
let holding_b_label = Label::new("amm-holding-b-label");
let SubcommandReturnValue::RegisterAccount {
account_id: holding_b_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_b_label.clone()),
label: Some(Label::new(holding_b_label.clone())),
})),
)
.await?
@ -520,14 +500,14 @@ async fn amm_new_pool_using_labels() -> Result<()> {
};
// Create holding_lp with a label
let holding_lp_label = "amm-holding-lp-label".to_owned();
let holding_lp_label = Label::new("amm-holding-lp-label");
let SubcommandReturnValue::RegisterAccount {
account_id: holding_lp_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_lp_label.clone()),
label: Some(Label::new(holding_lp_label.clone())),
})),
)
.await?
@ -537,10 +517,8 @@ async fn amm_new_pool_using_labels() -> Result<()> {
// Create token 1 and distribute to holding_a
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_1)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_1)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id_1),
supply_account_id: public_mention(supply_account_id_1),
name: "TOKEN1".to_owned(),
total_supply: 10,
};
@ -548,25 +526,20 @@ async fn amm_new_pool_using_labels() -> Result<()> {
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_1)),
from_label: None,
to: Some(format_public_account_id(holding_a_id)),
to_label: None,
from: public_mention(supply_account_id_1),
to: Some(public_mention(holding_a_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 5,
from_key_path: None,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Create token 2 and distribute to holding_b
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_2)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_2)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id_2),
supply_account_id: public_mention(supply_account_id_2),
name: "TOKEN2".to_owned(),
total_supply: 10,
};
@ -574,27 +547,21 @@ async fn amm_new_pool_using_labels() -> Result<()> {
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_2)),
from_label: None,
to: Some(format_public_account_id(holding_b_id)),
to_label: None,
from: public_mention(supply_account_id_2),
to: Some(public_mention(holding_b_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 5,
from_key_path: None,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Create AMM pool using account labels instead of IDs
let subcommand = AmmProgramAgnosticSubcommand::New {
user_holding_a: None,
user_holding_a_label: Some(holding_a_label),
user_holding_b: None,
user_holding_b_label: Some(holding_b_label),
user_holding_lp: None,
user_holding_lp_label: Some(holding_lp_label),
user_holding_a: holding_a_label.into(),
user_holding_b: holding_b_label.into(),
user_holding_lp: holding_lp_label.into(),
balance_a: 3,
balance_b: 3,
};

View File

@ -9,8 +9,8 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use ata_core::{compute_ata_seed, get_associated_token_account_id};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id,
format_public_account_id, verify_commitment_is_in_state,
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, private_mention, public_mention,
verify_commitment_is_in_state,
};
use log::info;
use nssa::program::Program;
@ -68,10 +68,8 @@ async fn create_ata_initializes_holding_account() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply,
}),
@ -85,8 +83,8 @@ async fn create_ata_initializes_holding_account() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(owner_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(owner_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -132,10 +130,8 @@ async fn create_ata_is_idempotent() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply: 100,
}),
@ -149,8 +145,8 @@ async fn create_ata_is_idempotent() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(owner_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(owner_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -162,8 +158,8 @@ async fn create_ata_is_idempotent() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(owner_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(owner_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -212,10 +208,8 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply,
}),
@ -240,16 +234,16 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(sender_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(sender_account_id),
token_definition: definition_account_id,
}),
)
.await?;
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(recipient_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(recipient_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -262,15 +256,12 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(sender_ata_id)),
to_label: None,
from: public_mention(supply_account_id),
to: Some(public_mention(sender_ata_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
from_key_path: None,
}),
)
.await?;
@ -283,9 +274,9 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Send {
from: format_public_account_id(sender_account_id),
token_definition: definition_account_id.to_string(),
to: recipient_ata_id.to_string(),
from: public_mention(sender_account_id),
token_definition: definition_account_id,
to: recipient_ata_id,
amount: transfer_amount,
}),
)
@ -321,8 +312,8 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Burn {
holder: format_public_account_id(sender_account_id),
token_definition: definition_account_id.to_string(),
holder: public_mention(sender_account_id),
token_definition: definition_account_id,
amount: burn_amount,
}),
)
@ -372,10 +363,8 @@ async fn create_ata_with_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply: 100,
}),
@ -389,8 +378,8 @@ async fn create_ata_with_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_private_account_id(owner_account_id),
token_definition: definition_account_id.to_string(),
owner: private_mention(owner_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -446,10 +435,8 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply,
}),
@ -474,16 +461,16 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_private_account_id(sender_account_id),
token_definition: definition_account_id.to_string(),
owner: private_mention(sender_account_id),
token_definition: definition_account_id,
}),
)
.await?;
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_public_account_id(recipient_account_id),
token_definition: definition_account_id.to_string(),
owner: public_mention(recipient_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -496,15 +483,12 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(sender_ata_id)),
to_label: None,
from: public_mention(supply_account_id),
to: Some(public_mention(sender_ata_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
from_key_path: None,
}),
)
.await?;
@ -517,9 +501,9 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Send {
from: format_private_account_id(sender_account_id),
token_definition: definition_account_id.to_string(),
to: recipient_ata_id.to_string(),
from: private_mention(sender_account_id),
token_definition: definition_account_id,
to: recipient_ata_id,
amount: transfer_amount,
}),
)
@ -574,10 +558,8 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
definition_account_id: public_mention(definition_account_id),
supply_account_id: public_mention(supply_account_id),
name: "TEST".to_owned(),
total_supply,
}),
@ -598,8 +580,8 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Create {
owner: format_private_account_id(holder_account_id),
token_definition: definition_account_id.to_string(),
owner: private_mention(holder_account_id),
token_definition: definition_account_id,
}),
)
.await?;
@ -612,15 +594,12 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(holder_ata_id)),
to_label: None,
from: public_mention(supply_account_id),
to: Some(public_mention(holder_ata_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
from_key_path: None,
}),
)
.await?;
@ -633,8 +612,8 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Ata(AtaSubcommand::Burn {
holder: format_private_account_id(holder_account_id),
token_definition: definition_account_id.to_string(),
holder: private_mention(holder_account_id),
token_definition: definition_account_id,
amount: burn_amount,
}),
)

View File

@ -2,18 +2,21 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, fetch_privacy_preserving_tx,
format_private_account_id, format_public_account_id, verify_commitment_is_in_state,
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, fetch_privacy_preserving_tx, private_mention,
public_mention, verify_commitment_is_in_state,
};
use log::info;
use nssa::{AccountId, program::Program};
use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::native_token_transfer::AuthTransferSubcommand,
use wallet::{
account::Label,
cli::{
CliAccountMention, Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::native_token_transfer::AuthTransferSubcommand,
},
};
#[test]
@ -24,16 +27,12 @@ async fn private_transfer_to_owned_account() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
from: private_mention(from),
to: Some(private_mention(to)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
to_key_path: None,
from_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -68,16 +67,12 @@ async fn private_transfer_to_foreign_account() -> Result<()> {
let to_vpk = Secp256k1Point::from_scalar(to_npk.0);
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
from: private_mention(from),
to: None,
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
to_identifier: Some(0),
amount: 100,
to_key_path: None,
from_key_path: None,
});
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -121,16 +116,12 @@ async fn deshielded_transfer_to_public_account() -> Result<()> {
assert_eq!(from_acc.balance, 10000);
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_public_account_id(to)),
to_label: None,
from: private_mention(from),
to: Some(public_mention(to)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
to_key_path: None,
from_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -179,25 +170,21 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
};
// Get the keys for the newly created account
let (to_keys, _, to_identifier) = ctx
let to = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.key_chain()
.private_account(to_account_id)
.context("Failed to get private account")?;
// Send to this account using claiming path (using npk and vpk instead of account ID)
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
from: private_mention(from),
to: None,
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
to_identifier: Some(to_identifier),
to_npk: Some(hex::encode(to.key_chain.nullifier_public_key.0)),
to_vpk: Some(hex::encode(&to.key_chain.viewing_public_key.0)),
to_identifier: Some(to.kind.identifier()),
amount: 100,
to_key_path: None,
from_key_path: None,
});
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -241,16 +228,12 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
from: public_mention(from),
to: Some(private_mention(to)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
to_key_path: None,
from_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -288,16 +271,12 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> {
let from: AccountId = ctx.existing_public_accounts()[0];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(from)),
from_label: None,
from: public_mention(from),
to: None,
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
to_identifier: Some(0),
amount: 100,
to_key_path: None,
from_key_path: None,
});
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -353,25 +332,21 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
};
// Get the newly created account's keys
let (to_keys, _, to_identifier) = ctx
let to = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.key_chain()
.private_account(to_account_id)
.context("Failed to get private account")?;
// Send transfer using nullifier and viewing public keys
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
from: private_mention(from),
to: None,
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
to_identifier: Some(to_identifier),
to_npk: Some(hex::encode(to.key_chain.nullifier_public_key.0)),
to_vpk: Some(hex::encode(&to.key_chain.viewing_public_key.0)),
to_identifier: Some(to.kind.identifier()),
amount: 100,
to_key_path: None,
from_key_path: None,
});
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -416,9 +391,7 @@ async fn initialize_private_account() -> Result<()> {
};
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: Some(format_private_account_id(account_id)),
account_label: None,
key_path: None,
account: private_mention(account_id),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -459,26 +432,21 @@ async fn private_transfer_using_from_label() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
// Assign a label to the sender account
let label = "private-sender-label".to_owned();
let label = Label::new("private-sender-label");
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_private_account_id(from)),
account_label: None,
account_id: private_mention(from),
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(label),
to: Some(format_private_account_id(to)),
to_label: None,
from: CliAccountMention::Label(label),
to: Some(private_mention(to)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -508,7 +476,7 @@ async fn initialize_private_account_using_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create a new private account with a label
let label = "init-private-label".to_owned();
let label = Label::new("init-private-label");
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: Some(label.clone()),
@ -520,9 +488,7 @@ async fn initialize_private_account_using_label() -> Result<()> {
// Initialize using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: None,
account_label: Some(label),
key_path: None,
account: label.into(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -559,15 +525,12 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
// Both transfers below will target this same node with distinct identifiers.
let chain_index = ctx.wallet_mut().create_private_accounts_key(None);
let (npk, vpk) = {
let node = ctx
let key_chain = ctx
.wallet()
.storage()
.user_data
.private_key_tree
.key_map
.get(&chain_index)
.expect("node was just inserted");
let key_chain = &node.value.0;
.key_chain()
.private_account_key_chain_by_index(&chain_index)
.expect("Failed to get private account key chain for chain index");
(
key_chain.nullifier_public_key,
key_chain.viewing_public_key.clone(),
@ -586,16 +549,12 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(sender_0)),
from_label: None,
from: public_mention(sender_0),
to: None,
to_label: None,
to_npk: Some(npk_hex.clone()),
to_vpk: Some(vpk_hex.clone()),
to_identifier: Some(identifier_1),
amount: 100,
from_key_path: None,
to_key_path: None,
}),
)
.await?;
@ -603,16 +562,12 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(sender_1)),
from_label: None,
from: public_mention(sender_1),
to: None,
to_label: None,
to_npk: Some(npk_hex),
to_vpk: Some(vpk_hex),
to_identifier: Some(identifier_2),
amount: 200,
from_key_path: None,
to_key_path: None,
}),
)
.await?;
@ -627,14 +582,14 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
.await?;
// Both accounts must be discovered with the correct balances.
let account_id_1 = AccountId::from((&npk, identifier_1));
let account_id_1 = AccountId::for_regular_private_account(&npk, identifier_1);
let acc_1 = ctx
.wallet()
.get_account_private(account_id_1)
.context("account for identifier 1 not found after sync")?;
assert_eq!(acc_1.balance, 100);
let account_id_2 = AccountId::from((&npk, identifier_2));
let account_id_2 = AccountId::for_regular_private_account(&npk, identifier_2);
let acc_2 = ctx
.wallet()
.get_account_private(account_id_2)
@ -642,21 +597,25 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
assert_eq!(acc_2.balance, 200);
// Both account ids must resolve to the same key node.
let tree = &ctx.wallet().storage().user_data.private_key_tree;
let ci_1 = tree
.account_id_map
.get(&account_id_1)
.context("account_id_1 missing from private_key_tree.account_id_map")?;
let ci_2 = tree
.account_id_map
.get(&account_id_2)
.context("account_id_2 missing from private_key_tree.account_id_map")?;
let found_acc1 = ctx
.wallet()
.storage()
.key_chain()
.private_account(account_id_1)
.context("account_id_1 not found in key chain")?;
let found_acc2 = ctx
.wallet()
.storage()
.key_chain()
.private_account(account_id_2)
.context("account_id_2 not found in key chain")?;
assert_eq!(
ci_1, ci_2,
found_acc1.chain_index, found_acc2.chain_index,
"identifiers 1 and 2 under the same NPK must share a single chain_index"
);
assert_eq!(
ci_1, &chain_index,
found_acc1.chain_index,
Some(chain_index),
"both accounts must resolve to the key node created at the start of the test"
);

View File

@ -1,15 +1,19 @@
use std::time::Duration;
use anyhow::Result;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use common::transaction::NSSATransaction;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, public_mention};
use log::info;
use nssa::program::Program;
use nssa::{program::Program, public_transaction, system_faucet_account_id};
use sequencer_service_rpc::RpcClient as _;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::native_token_transfer::AuthTransferSubcommand,
use wallet::{
account::Label,
cli::{
CliAccountMention, Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::native_token_transfer::AuthTransferSubcommand,
},
};
#[test]
@ -17,16 +21,12 @@ async fn successful_transfer_to_existing_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -69,8 +69,9 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
let new_persistent_account_id = ctx
.wallet()
.storage()
.user_data
.account_ids()
.key_chain()
.public_account_ids()
.map(|(account_id, _)| account_id)
.find(|acc_id| {
*acc_id != ctx.existing_public_accounts()[0]
&& *acc_id != ctx.existing_public_accounts()[1]
@ -78,16 +79,12 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
.expect("Failed to find newly created account in the wallet storage");
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(new_persistent_account_id)),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(new_persistent_account_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -119,16 +116,12 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 1_000_000,
from_key_path: None,
to_key_path: None,
});
let failed_send = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await;
@ -162,16 +155,12 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
// First transfer
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -199,16 +188,12 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
// Second transfer
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -251,9 +236,7 @@ async fn initialize_public_account() -> Result<()> {
};
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: Some(format_public_account_id(account_id)),
account_label: None,
key_path: None,
account: public_mention(account_id),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -278,26 +261,21 @@ async fn successful_transfer_using_from_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign a label to the sender account
let label = "sender-label".to_owned();
let label = Label::new("sender-label");
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
account_id: public_mention(ctx.existing_public_accounts()[0]),
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(label),
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: CliAccountMention::Label(label),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -328,26 +306,21 @@ async fn successful_transfer_using_to_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign a label to the receiver account
let label = "receiver-label".to_owned();
let label = Label::new("receiver-label");
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
account_id: public_mention(ctx.existing_public_accounts()[1]),
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label for the recipient
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: None,
to_label: Some(label),
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(CliAccountMention::Label(label)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -372,3 +345,150 @@ async fn successful_transfer_using_to_label() -> Result<()> {
Ok(())
}
#[test]
async fn cannot_transfer_funds_from_system_faucet_account() -> Result<()> {
let ctx = TestContext::new().await?;
let faucet_account_id = system_faucet_account_id();
let recipient = ctx.existing_public_accounts()[0];
let recipient_balance_before = ctx
.sequencer_client()
.get_account_balance(recipient)
.await?;
let faucet_balance_before = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
let amount = 1_u128;
let message = public_transaction::Message::try_new(
Program::authenticated_transfer_program().id(),
vec![faucet_account_id, recipient],
vec![],
authenticated_transfer_core::Instruction::Transfer { amount },
)?;
let tx = nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
);
let tx_hash = ctx
.sequencer_client()
.send_transaction(NSSATransaction::Public(tx))
.await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let recipient_balance_after = ctx
.sequencer_client()
.get_account_balance(recipient)
.await?;
let faucet_balance_after = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
let tx_on_chain = ctx.sequencer_client().get_transaction(tx_hash).await?;
assert_eq!(recipient_balance_after, recipient_balance_before);
assert_eq!(faucet_balance_after, faucet_balance_before);
assert!(tx_on_chain.is_none());
Ok(())
}
#[test]
async fn can_transfer_funds_to_system_faucet_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let faucet_account_id = system_faucet_account_id();
let sender = ctx.existing_public_accounts()[0];
let sender_balance_before = ctx.sequencer_client().get_account_balance(sender).await?;
let faucet_balance_before = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
let amount = 100_u128;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: public_mention(sender),
to: Some(public_mention(faucet_account_id)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let sender_balance_after = ctx.sequencer_client().get_account_balance(sender).await?;
let faucet_balance_after = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
assert_eq!(sender_balance_after, sender_balance_before - amount);
assert_eq!(faucet_balance_after, faucet_balance_before + amount);
Ok(())
}
#[test]
async fn cannot_execute_faucet_program() -> Result<()> {
let ctx = TestContext::new().await?;
let faucet_account_id = system_faucet_account_id();
let recipient = ctx.existing_public_accounts()[0];
let vault_program_id = Program::vault().id();
let recipient_vault_id = vault_core::compute_vault_account_id(vault_program_id, recipient);
let recipient_balance_before = ctx
.sequencer_client()
.get_account_balance(recipient)
.await?;
let faucet_balance_before = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
let amount = 1_u128;
let message = public_transaction::Message::try_new(
Program::faucet().id(),
vec![faucet_account_id, recipient_vault_id],
vec![],
faucet_core::Instruction::Transfer {
vault_program_id,
recipient_id: recipient,
amount,
},
)?;
let tx = nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
);
let tx_hash = ctx
.sequencer_client()
.send_transaction(NSSATransaction::Public(tx))
.await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let recipient_balance_after = ctx
.sequencer_client()
.get_account_balance(recipient)
.await?;
let faucet_balance_after = ctx
.sequencer_client()
.get_account_balance(faucet_account_id)
.await?;
let tx_on_chain = ctx.sequencer_client().get_transaction(tx_hash).await?;
assert_eq!(recipient_balance_after, recipient_balance_before);
assert_eq!(faucet_balance_after, faucet_balance_before);
assert!(tx_on_chain.is_none());
Ok(())
}

View File

@ -9,54 +9,61 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use indexer_service_rpc::RpcClient as _;
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id,
format_public_account_id, verify_commitment_is_in_state,
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, private_mention, public_mention,
verify_commitment_is_in_state,
};
use log::info;
use nssa::AccountId;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
use wallet::{
account::Label,
cli::{CliAccountMention, Command, programs::native_token_transfer::AuthTransferSubcommand},
};
/// Maximum time to wait for the indexer to catch up to the sequencer.
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 180_000;
/// Poll the indexer until its last finalized block id reaches the sequencer's
/// current last block id (and at least the genesis block has been advanced past),
/// or until [`L2_TO_L1_TIMEOUT_MILLIS`] elapses. Returns the last indexer block
/// id observed.
async fn wait_for_indexer_to_catch_up(ctx: &TestContext) -> u64 {
/// current last block id or until [`L2_TO_L1_TIMEOUT_MILLIS`] elapses.
/// Returns the last indexer block id observed.
async fn wait_for_indexer_to_catch_up(ctx: &TestContext) -> Result<u64> {
let timeout = Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS);
let block_id_to_catch_up =
sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client()).await?;
let mut last_ind: u64 = 1;
let inner = async {
loop {
let seq = sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client())
.await
.unwrap_or(0);
let ind = ctx
.indexer_client()
.get_last_finalized_block_id()
.await
.unwrap_or(1);
.await?
.unwrap_or(0);
last_ind = ind;
if ind >= seq && ind > 1 {
info!("Indexer caught up: seq={seq}, ind={ind}");
return ind;
if ind >= block_id_to_catch_up {
let last_seq =
sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client())
.await?;
info!(
"Indexer caught up. Indexer last block id: {ind}. Current sequencer last block id: {last_seq}"
);
return Ok(ind);
}
tokio::time::sleep(Duration::from_secs(2)).await;
}
};
tokio::time::timeout(timeout, inner)
.await
.unwrap_or_else(|_| {
info!("Indexer catch-up timed out: ind={last_ind}");
last_ind
})
.with_context(|| {
format!(
"Indexer failed to catch up within {L2_TO_L1_TIMEOUT_MILLIS} milliseconds. Last indexer block id observed: {last_ind}, but needed to catch up to at least {block_id_to_catch_up}"
)
})?
}
#[tokio::test]
async fn indexer_test_run() -> Result<()> {
let ctx = TestContext::new().await?;
let last_block_indexer = wait_for_indexer_to_catch_up(&ctx).await;
let last_block_indexer = wait_for_indexer_to_catch_up(&ctx).await?;
let last_block_seq =
sequencer_service_rpc::RpcClient::get_last_block_id(ctx.sequencer_client()).await?;
@ -64,7 +71,7 @@ async fn indexer_test_run() -> Result<()> {
info!("Last block on seq now is {last_block_seq}");
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer > 1);
assert!(last_block_indexer > 0);
Ok(())
}
@ -74,11 +81,11 @@ async fn indexer_block_batching() -> Result<()> {
let ctx = TestContext::new().await?;
info!("Waiting for indexer to parse blocks");
let last_block_indexer = wait_for_indexer_to_catch_up(&ctx).await;
let last_block_indexer = wait_for_indexer_to_catch_up(&ctx).await?;
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer > 1);
assert!(last_block_indexer > 0);
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = ctx.indexer_client().get_blocks(None, 100).await.unwrap();
@ -105,16 +112,12 @@ async fn indexer_state_consistency() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.existing_public_accounts()[0]),
to: Some(public_mention(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -144,16 +147,12 @@ async fn indexer_state_consistency() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
from: private_mention(from),
to: Some(private_mention(to)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -176,7 +175,7 @@ async fn indexer_state_consistency() -> Result<()> {
info!("Successfully transferred privately to owned account");
info!("Waiting for indexer to parse blocks");
wait_for_indexer_to_catch_up(&ctx).await;
wait_for_indexer_to_catch_up(&ctx).await?;
let acc1_ind_state = ctx
.indexer_client()
@ -214,35 +213,29 @@ async fn indexer_state_consistency_with_labels() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign labels to both accounts
let from_label = "idx-sender-label".to_owned();
let to_label_str = "idx-receiver-label".to_owned();
let from_label = Label::new("idx-sender-label");
let to_label = Label::new("idx-receiver-label");
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
account_id: public_mention(ctx.existing_public_accounts()[0]),
label: from_label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd).await?;
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
label: to_label_str.clone(),
account_id: public_mention(ctx.existing_public_accounts()[1]),
label: to_label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd).await?;
// Send using labels instead of account IDs
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(from_label),
to: None,
to_label: Some(to_label_str),
from: CliAccountMention::Label(from_label),
to: Some(CliAccountMention::Label(to_label)),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
from_key_path: None,
to_key_path: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -265,7 +258,7 @@ async fn indexer_state_consistency_with_labels() -> Result<()> {
assert_eq!(acc_2_balance, 20100);
info!("Waiting for indexer to parse blocks");
wait_for_indexer_to_catch_up(&ctx).await;
wait_for_indexer_to_catch_up(&ctx).await?;
let acc1_ind_state = ctx
.indexer_client()

View File

@ -1,78 +1,184 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
clippy::undocumented_unsafe_blocks,
reason = "We don't care about these in tests"
)]
use anyhow::{Context as _, Result};
use indexer_service_rpc::RpcClient as _;
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, format_private_account_id, format_public_account_id,
test_context_ffi::BlockingTestContextFFI, verify_commitment_is_in_state,
use std::{
ffi::{CString, c_char},
fs::File,
io::Write as _,
net::SocketAddr,
};
use log::info;
use anyhow::{Context as _, Result};
use indexer_ffi::{
IndexerServiceFFI, OperationStatus, Runtime,
api::{
PointerResult,
lifecycle::InitializedIndexerServiceFFIResult,
types::{FfiAccountId, FfiOption, FfiVec, account::FfiAccount, block::FfiBlock},
},
};
use integration_tests::{
BlockingTestContext, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, private_mention,
public_mention, verify_commitment_is_in_state,
};
use log::{debug, info};
use nssa::AccountId;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
use tempfile::TempDir;
use wallet::{
account::Label,
cli::{Command, programs::native_token_transfer::AuthTransferSubcommand},
};
/// Maximum time to wait for the indexer to catch up to the sequencer.
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 180_000;
unsafe extern "C" {
unsafe fn query_last_block(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
) -> PointerResult<u64, OperationStatus>;
unsafe fn query_block_vec(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
before: FfiOption<u64>,
limit: u64,
) -> PointerResult<FfiVec<FfiBlock>, OperationStatus>;
unsafe fn query_account(
runtime: *const Runtime,
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
) -> PointerResult<FfiAccount, OperationStatus>;
unsafe fn start_indexer(
runtime: *const Runtime,
config_path: *const c_char,
port: u16,
) -> InitializedIndexerServiceFFIResult;
}
fn setup_indexer_ffi(
runtime: &Runtime,
bedrock_addr: SocketAddr,
) -> Result<(IndexerServiceFFI, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config =
integration_tests::config::indexer_config(bedrock_addr, temp_indexer_dir.path().to_owned())
.context("Failed to create Indexer config")?;
let config_json = serde_json::to_vec(&indexer_config)?;
let config_path = temp_indexer_dir.path().join("indexer_config.json");
let mut file = File::create(config_path.as_path())?;
file.write_all(&config_json)?;
file.flush()?;
let res =
// SAFETY: lib function ensures validity of value.
unsafe { start_indexer(std::ptr::from_ref(runtime), CString::new(config_path.to_str().unwrap())?.as_ptr(), 0) };
if res.error.is_error() {
anyhow::bail!("Indexer FFI error {:?}", res.error);
}
Ok((
// SAFETY: lib function ensures validity of value.
unsafe { std::ptr::read(res.value) },
temp_indexer_dir,
))
}
/// Prepare setup for tests.
fn setup() -> Result<(BlockingTestContext, IndexerServiceFFI, TempDir)> {
let ctx = TestContext::builder().disable_indexer().build_blocking()?;
// Safety: ctx runtime is valid for the lifetime of the returned Runtime
let runtime = unsafe { Runtime::from_borrowed(ctx.runtime()) };
let (indexer_ffi, indexer_dir) = setup_indexer_ffi(&runtime, ctx.ctx().bedrock_addr())?;
Ok((ctx, indexer_ffi, indexer_dir))
}
#[test]
fn indexer_test_run_ffi() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime();
let (ctx, indexer_ffi, _indexer_dir) = setup()?;
// RUN OBSERVATION
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
std::thread::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS));
let last_block_indexer = blocking_ctx.ctx().get_last_block_indexer(runtime_wrapped)?;
// Safety: ctx runtime is valid for the lifetime of the returned Runtime
let runtime = unsafe { Runtime::from_borrowed(ctx.runtime()) };
let last_block_indexer_ffi_res =
unsafe { query_last_block(&raw const runtime, &raw const indexer_ffi) };
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer_ffi_res.error.is_ok());
assert!(last_block_indexer > 1);
let last_block_indexer_ffi = unsafe { *last_block_indexer_ffi_res.value };
info!("Last block on indexer FFI now is {last_block_indexer_ffi}");
assert!(last_block_indexer_ffi > 0);
Ok(())
}
#[test]
fn indexer_ffi_block_batching() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime();
let ctx = blocking_ctx.ctx();
let (ctx, indexer_ffi, _indexer_dir) = setup()?;
// WAIT
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
std::thread::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS));
let last_block_indexer = runtime_wrapped
.block_on(ctx.indexer_client().get_last_finalized_block_id())
.unwrap();
// Safety: ctx runtime is valid for the lifetime of the returned Runtime
let runtime = unsafe { Runtime::from_borrowed(ctx.runtime()) };
let last_block_indexer_ffi_res =
unsafe { query_last_block(&raw const runtime, &raw const indexer_ffi) };
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer_ffi_res.error.is_ok());
assert!(last_block_indexer > 1);
let last_block_indexer = unsafe { *last_block_indexer_ffi_res.value };
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = runtime_wrapped
.block_on(ctx.indexer_client().get_blocks(None, 100))
.unwrap();
info!("Last block on indexer FFI now is {last_block_indexer}");
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
assert!(last_block_indexer > 0);
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;
let before_ffi = FfiOption::<u64>::from_none();
let limit = 100;
for block in &block_batch[1..] {
assert_eq!(block.header.prev_block_hash, prev_block_hash);
let block_batch_ffi_res = unsafe {
query_block_vec(
&raw const runtime,
&raw const indexer_ffi,
before_ffi,
limit,
)
};
assert!(block_batch_ffi_res.error.is_ok());
let block_batch = unsafe { &*block_batch_ffi_res.value };
let mut last_block_prev_hash = unsafe { block_batch.get(0) }.header.prev_block_hash.data;
for i in 1..block_batch.len {
let block = unsafe { block_batch.get(i) };
assert_eq!(last_block_prev_hash, block.header.hash.data);
info!("Block {} chain-consistent", block.header.block_id);
prev_block_hash = block.header.hash;
last_block_prev_hash = block.header.prev_block_hash.data;
}
Ok(())
@ -80,44 +186,37 @@ fn indexer_ffi_block_batching() -> Result<()> {
#[test]
fn indexer_ffi_state_consistency() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let ctx = blocking_ctx.ctx_mut();
let (mut ctx, indexer_ffi, _indexer_dir) = setup()?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
from: public_mention(ctx.ctx().existing_public_accounts()[0]),
to: Some(public_mention(ctx.ctx().existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
from_key_path: None,
to_key_path: None,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
ctx.block_on_mut(|ctx| wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
std::thread::sleep(std::time::Duration::from_secs(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
));
info!("Checking correct balance move");
let acc_1_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
let acc_1_balance = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc_2_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
)
})?;
let acc_2_balance = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
)
})?;
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
@ -125,80 +224,90 @@ fn indexer_ffi_state_consistency() -> Result<()> {
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
let from: AccountId = ctx.existing_private_accounts()[0];
let to: AccountId = ctx.existing_private_accounts()[1];
let from: AccountId = ctx.ctx().existing_private_accounts()[0];
let to: AccountId = ctx.ctx().existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
from: private_mention(from),
to: Some(private_mention(to)),
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
from_key_path: None,
to_key_path: None,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
ctx.block_on_mut(|ctx| wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
std::thread::sleep(std::time::Duration::from_secs(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
));
let new_commitment1 = ctx
.ctx()
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
let commitment_check1 = runtime_wrapped.block_on(verify_commitment_is_in_state(
new_commitment1,
ctx.sequencer_client(),
));
let commitment_check1 =
ctx.block_on(|ctx| verify_commitment_is_in_state(new_commitment1, ctx.sequencer_client()));
assert!(commitment_check1);
let new_commitment2 = ctx
.ctx()
.wallet()
.get_private_account_commitment(to)
.context("Failed to get private account commitment for receiver")?;
let commitment_check2 = runtime_wrapped.block_on(verify_commitment_is_in_state(
new_commitment2,
ctx.sequencer_client(),
));
let commitment_check2 =
ctx.block_on(|ctx| verify_commitment_is_in_state(new_commitment2, ctx.sequencer_client()));
assert!(commitment_check2);
info!("Successfully transferred privately to owned account");
// WAIT
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
std::thread::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS));
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc2_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[1].into()),
)?;
// Safety: ctx runtime is valid for the lifetime of the returned Runtime
let runtime = unsafe { Runtime::from_borrowed(ctx.runtime()) };
let acc1_ind_state_ffi = unsafe {
query_account(
&raw const runtime,
&raw const indexer_ffi,
(&ctx.ctx().existing_public_accounts()[0]).into(),
)
};
assert!(acc1_ind_state_ffi.error.is_ok());
let acc1_ind_state_pre = unsafe { &*acc1_ind_state_ffi.value };
let acc1_ind_state: indexer_service_protocol::Account = acc1_ind_state_pre.into();
let acc2_ind_state_ffi = unsafe {
query_account(
&raw const runtime,
&raw const indexer_ffi,
(&ctx.ctx().existing_public_accounts()[1]).into(),
)
};
assert!(acc2_ind_state_ffi.error.is_ok());
let acc2_ind_state_pre = unsafe { &*acc2_ind_state_ffi.value };
let acc2_ind_state: indexer_service_protocol::Account = acc2_ind_state_pre.into();
info!("Checking correct state transition");
let acc1_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
let acc1_seq_state = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc2_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
)
})?;
let acc2_seq_state = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
)
})?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());
assert_eq!(acc2_ind_state, acc2_seq_state.into());
@ -210,80 +319,81 @@ fn indexer_ffi_state_consistency() -> Result<()> {
#[test]
fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let ctx = blocking_ctx.ctx_mut();
let (mut ctx, indexer_ffi, _indexer_dir) = setup()?;
// Assign labels to both accounts
let from_label = "idx-sender-label".to_owned();
let to_label_str = "idx-receiver-label".to_owned();
let from_label = Label::new("idx-sender-label");
let to_label = Label::new("idx-receiver-label");
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
account_id: public_mention(ctx.ctx().existing_public_accounts()[0]),
label: from_label.clone(),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
ctx.block_on_mut(|ctx| wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
label: to_label_str.clone(),
account_id: public_mention(ctx.ctx().existing_public_accounts()[1]),
label: to_label.clone(),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
ctx.block_on_mut(|ctx| wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
// Send using labels instead of account IDs
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(from_label),
to: None,
to_label: Some(to_label_str),
from: from_label.into(),
to: Some(to_label.into()),
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
from_key_path: None,
to_key_path: None,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
ctx.block_on_mut(|ctx| wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
std::thread::sleep(std::time::Duration::from_secs(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
));
let acc_1_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
let acc_1_balance = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc_2_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
)
})?;
let acc_2_balance = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
)
})?;
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
std::thread::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS));
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc1_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
// Safety: ctx runtime is valid for the lifetime of the returned Runtime
let runtime = unsafe { Runtime::from_borrowed(ctx.runtime()) };
let acc1_ind_state_ffi = unsafe {
query_account(
&raw const runtime,
&raw const indexer_ffi,
(&ctx.ctx().existing_public_accounts()[0]).into(),
)
};
assert!(acc1_ind_state_ffi.error.is_ok());
let acc1_ind_state_pre = unsafe { &*acc1_ind_state_ffi.value };
let acc1_ind_state: indexer_service_protocol::Account = acc1_ind_state_pre.into();
let acc1_seq_state = ctx.block_on(|ctx| {
sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
)
})?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());

Some files were not shown because too many files have changed in this diff Show More