mirror of
https://github.com/logos-blockchain/logos-execution-zone.git
synced 2026-05-09 01:29:39 +00:00
merge: resolve Cargo.lock conflict with main
This commit is contained in:
commit
c327f592bf
57
Cargo.lock
generated
57
Cargo.lock
generated
@ -1111,7 +1111,7 @@ dependencies = [
|
|||||||
"log",
|
"log",
|
||||||
"num",
|
"num",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rustls",
|
"rustls",
|
||||||
"rustls-native-certs",
|
"rustls-native-certs",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
@ -1960,7 +1960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de"
|
checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"data-encoding",
|
"data-encoding",
|
||||||
"syn 1.0.109",
|
"syn 2.0.117",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2109,7 +2109,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"option-ext",
|
"option-ext",
|
||||||
"redox_users",
|
"redox_users",
|
||||||
"windows-sys 0.59.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2410,7 +2410,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3480,6 +3480,16 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "indexer_ffi"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"cbindgen",
|
||||||
|
"indexer_service",
|
||||||
|
"log",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indexer_service"
|
name = "indexer_service"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -3591,6 +3601,7 @@ dependencies = [
|
|||||||
"env_logger",
|
"env_logger",
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
|
"indexer_ffi",
|
||||||
"indexer_service",
|
"indexer_service",
|
||||||
"indexer_service_rpc",
|
"indexer_service_rpc",
|
||||||
"key_protocol",
|
"key_protocol",
|
||||||
@ -3840,7 +3851,7 @@ dependencies = [
|
|||||||
"jsonrpsee-types",
|
"jsonrpsee-types",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@ -4058,7 +4069,7 @@ dependencies = [
|
|||||||
"oco_ref",
|
"oco_ref",
|
||||||
"or_poisoned",
|
"or_poisoned",
|
||||||
"paste",
|
"paste",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"reactive_graph",
|
"reactive_graph",
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"rustc_version",
|
"rustc_version",
|
||||||
@ -5391,7 +5402,7 @@ version = "0.50.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
|
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"windows-sys 0.59.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -5720,7 +5731,7 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@ -6104,7 +6115,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.11.0",
|
"bitflags 2.11.0",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rand_chacha 0.9.0",
|
"rand_chacha 0.9.0",
|
||||||
"rand_xorshift",
|
"rand_xorshift",
|
||||||
"unarray",
|
"unarray",
|
||||||
@ -6217,7 +6228,7 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"getrandom 0.3.4",
|
"getrandom 0.3.4",
|
||||||
"lru-slab",
|
"lru-slab",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"ring",
|
"ring",
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"rustls",
|
"rustls",
|
||||||
@ -6305,9 +6316,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rand"
|
name = "rand"
|
||||||
version = "0.9.2"
|
version = "0.9.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
|
checksum = "7ec095654a25171c2124e9e3393a930bddbffdc939556c914957a4c3e0a87166"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rand_chacha 0.9.0",
|
"rand_chacha 0.9.0",
|
||||||
"rand_core 0.9.5",
|
"rand_core 0.9.5",
|
||||||
@ -6626,7 +6637,7 @@ dependencies = [
|
|||||||
"elf",
|
"elf",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"postcard",
|
"postcard",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"risc0-zkp",
|
"risc0-zkp",
|
||||||
"risc0-zkvm-platform",
|
"risc0-zkvm-platform",
|
||||||
"ruint",
|
"ruint",
|
||||||
@ -6722,7 +6733,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"lazy-regex",
|
"lazy-regex",
|
||||||
"metal",
|
"metal",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rayon",
|
"rayon",
|
||||||
"risc0-circuit-recursion-sys",
|
"risc0-circuit-recursion-sys",
|
||||||
"risc0-core",
|
"risc0-core",
|
||||||
@ -6766,7 +6777,7 @@ dependencies = [
|
|||||||
"num-traits",
|
"num-traits",
|
||||||
"paste",
|
"paste",
|
||||||
"postcard",
|
"postcard",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rayon",
|
"rayon",
|
||||||
"ringbuffer",
|
"ringbuffer",
|
||||||
"risc0-binfmt",
|
"risc0-binfmt",
|
||||||
@ -6873,7 +6884,7 @@ dependencies = [
|
|||||||
"ndarray",
|
"ndarray",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"paste",
|
"paste",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rand_core 0.9.5",
|
"rand_core 0.9.5",
|
||||||
"rayon",
|
"rayon",
|
||||||
"risc0-core",
|
"risc0-core",
|
||||||
@ -6911,7 +6922,7 @@ dependencies = [
|
|||||||
"num-traits",
|
"num-traits",
|
||||||
"object",
|
"object",
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"rayon",
|
"rayon",
|
||||||
"risc0-binfmt",
|
"risc0-binfmt",
|
||||||
"risc0-build",
|
"risc0-build",
|
||||||
@ -6999,7 +7010,7 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"light-poseidon",
|
"light-poseidon",
|
||||||
"quote",
|
"quote",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"syn 1.0.109",
|
"syn 1.0.109",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"tiny-keccak",
|
"tiny-keccak",
|
||||||
@ -7050,7 +7061,7 @@ dependencies = [
|
|||||||
"borsh",
|
"borsh",
|
||||||
"proptest",
|
"proptest",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"ruint-macro",
|
"ruint-macro",
|
||||||
"serde_core",
|
"serde_core",
|
||||||
"valuable",
|
"valuable",
|
||||||
@ -7094,7 +7105,7 @@ dependencies = [
|
|||||||
"errno",
|
"errno",
|
||||||
"libc",
|
"libc",
|
||||||
"linux-raw-sys",
|
"linux-raw-sys",
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -8027,7 +8038,7 @@ dependencies = [
|
|||||||
"getrandom 0.4.2",
|
"getrandom 0.4.2",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"rustix",
|
"rustix",
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -8742,7 +8753,7 @@ dependencies = [
|
|||||||
"http",
|
"http",
|
||||||
"httparse",
|
"httparse",
|
||||||
"log",
|
"log",
|
||||||
"rand 0.9.2",
|
"rand 0.9.3",
|
||||||
"sha1",
|
"sha1",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"utf-8",
|
"utf-8",
|
||||||
@ -9310,7 +9321,7 @@ version = "0.1.11"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
|
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|||||||
@ -38,6 +38,7 @@ members = [
|
|||||||
"examples/program_deployment/methods/guest",
|
"examples/program_deployment/methods/guest",
|
||||||
"bedrock_client",
|
"bedrock_client",
|
||||||
"testnet_initial_state",
|
"testnet_initial_state",
|
||||||
|
"indexer_ffi",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
@ -57,6 +58,7 @@ indexer_service_protocol = { path = "indexer/service/protocol" }
|
|||||||
indexer_service_rpc = { path = "indexer/service/rpc" }
|
indexer_service_rpc = { path = "indexer/service/rpc" }
|
||||||
wallet = { path = "wallet" }
|
wallet = { path = "wallet" }
|
||||||
wallet-ffi = { path = "wallet-ffi", default-features = false }
|
wallet-ffi = { path = "wallet-ffi", default-features = false }
|
||||||
|
indexer_ffi = { path = "indexer_ffi" }
|
||||||
clock_core = { path = "programs/clock/core" }
|
clock_core = { path = "programs/clock/core" }
|
||||||
token_core = { path = "programs/token/core" }
|
token_core = { path = "programs/token/core" }
|
||||||
token_program = { path = "programs/token" }
|
token_program = { path = "programs/token" }
|
||||||
|
|||||||
@ -3,7 +3,7 @@ use nssa::AccountId;
|
|||||||
use crate::{
|
use crate::{
|
||||||
HashType,
|
HashType,
|
||||||
block::{Block, HashableBlockData},
|
block::{Block, HashableBlockData},
|
||||||
transaction::NSSATransaction,
|
transaction::{NSSATransaction, clock_invocation},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Helpers
|
// Helpers
|
||||||
@ -15,7 +15,7 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
|
|||||||
|
|
||||||
// Dummy producers
|
// Dummy producers
|
||||||
|
|
||||||
/// Produce dummy block with.
|
/// Produce dummy block with provided transactions + clock transaction an the end.
|
||||||
///
|
///
|
||||||
/// `id` - block id, provide zero for genesis.
|
/// `id` - block id, provide zero for genesis.
|
||||||
///
|
///
|
||||||
@ -26,8 +26,12 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
|
|||||||
pub fn produce_dummy_block(
|
pub fn produce_dummy_block(
|
||||||
id: u64,
|
id: u64,
|
||||||
prev_hash: Option<HashType>,
|
prev_hash: Option<HashType>,
|
||||||
transactions: Vec<NSSATransaction>,
|
mut transactions: Vec<NSSATransaction>,
|
||||||
) -> Block {
|
) -> Block {
|
||||||
|
transactions.push(NSSATransaction::Public(clock_invocation(
|
||||||
|
id.saturating_mul(100),
|
||||||
|
)));
|
||||||
|
|
||||||
let block_data = HashableBlockData {
|
let block_data = HashableBlockData {
|
||||||
block_id: id,
|
block_id: id,
|
||||||
prev_block_hash: prev_hash.unwrap_or_default(),
|
prev_block_hash: prev_hash.unwrap_or_default(),
|
||||||
|
|||||||
24
flake.lock
generated
24
flake.lock
generated
@ -2,11 +2,11 @@
|
|||||||
"nodes": {
|
"nodes": {
|
||||||
"crane": {
|
"crane": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1769737823,
|
"lastModified": 1776396856,
|
||||||
"narHash": "sha256-DrBaNpZ+sJ4stXm+0nBX7zqZT9t9P22zbk6m5YhQxS4=",
|
"narHash": "sha256-aRJpIJUlZLaf06ekPvqjuU46zvO9K90IxJGpbqodkPs=",
|
||||||
"owner": "ipetkov",
|
"owner": "ipetkov",
|
||||||
"repo": "crane",
|
"repo": "crane",
|
||||||
"rev": "b2f45c3830aa96b7456a4c4bc327d04d7a43e1ba",
|
"rev": "28462d6d55c33206ffa5a56c7907ca3125ed788f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -20,11 +20,11 @@
|
|||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1770979891,
|
"lastModified": 1775835011,
|
||||||
"narHash": "sha256-cvkVnE7btuFLzv70ORAZve9K1Huiplq0iECgXSXb0ZY=",
|
"narHash": "sha256-SQDLyyRUa5J9QHjNiHbeZw4rQOZnTEo61TcaUpjtLBs=",
|
||||||
"owner": "logos-blockchain",
|
"owner": "logos-blockchain",
|
||||||
"repo": "logos-blockchain-circuits",
|
"repo": "logos-blockchain-circuits",
|
||||||
"rev": "ec7d298e5a3a0507bb8570df86cdf78dc452d024",
|
"rev": "d6cf41f66500d4afc157b4f43de0f0d5bfa01443",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -51,11 +51,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs_2": {
|
"nixpkgs_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1770019141,
|
"lastModified": 1776169885,
|
||||||
"narHash": "sha256-VKS4ZLNx4PNrABoB0L8KUpc1fE7CLpQXQs985tGfaCU=",
|
"narHash": "sha256-l/iNYDZ4bGOAFQY2q8y5OAfBBtrDAaPuRQqWaFHVRXM=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "cb369ef2efd432b3cdf8622b0ffc0a97a02f3137",
|
"rev": "4bd9165a9165d7b5e33ae57f3eecbcb28fb231c9",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -80,11 +80,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1770088046,
|
"lastModified": 1776395632,
|
||||||
"narHash": "sha256-4hfYDnUTvL1qSSZEA4CEThxfz+KlwSFQ30Z9jgDguO0=",
|
"narHash": "sha256-Mi1uF5f2FsdBIvy+v7MtsqxD3Xjhd0ARJdwoqqqPtJo=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "71f9daa4e05e49c434d08627e755495ae222bc34",
|
"rev": "8087ff1f47fff983a1fba70fa88b759f2fd8ae97",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
22
flake.nix
22
flake.nix
@ -130,9 +130,26 @@
|
|||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
indexerFfiPackage = craneLib.buildPackage (
|
||||||
|
commonArgs
|
||||||
|
// {
|
||||||
|
pname = "logos-execution-zone-indexer-ffi";
|
||||||
|
version = "0.1.0";
|
||||||
|
cargoExtraArgs = "-p indexer_ffi";
|
||||||
|
postInstall = ''
|
||||||
|
mkdir -p $out/include
|
||||||
|
cp indexer_ffi/indexer_ffi.h $out/include/
|
||||||
|
''
|
||||||
|
+ pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
|
||||||
|
install_name_tool -id @rpath/libindexer_ffi.dylib $out/lib/libindexer_ffi.dylib
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
);
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
wallet = walletFfiPackage;
|
wallet = walletFfiPackage;
|
||||||
|
indexer = indexerFfiPackage;
|
||||||
default = walletFfiPackage;
|
default = walletFfiPackage;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -144,9 +161,14 @@
|
|||||||
walletFfiShell = pkgs.mkShell {
|
walletFfiShell = pkgs.mkShell {
|
||||||
inputsFrom = [ walletFfiPackage ];
|
inputsFrom = [ walletFfiPackage ];
|
||||||
};
|
};
|
||||||
|
indexerFfiPackage = self.packages.${system}.indexer;
|
||||||
|
indexerFfiShell = pkgs.mkShell {
|
||||||
|
inputsFrom = [ indexerFfiPackage ];
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
wallet = walletFfiShell;
|
wallet = walletFfiShell;
|
||||||
|
indexer = indexerFfiShell;
|
||||||
default = walletFfiShell;
|
default = walletFfiShell;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|||||||
@ -243,14 +243,9 @@ mod tests {
|
|||||||
&sign_key,
|
&sign_key,
|
||||||
);
|
);
|
||||||
let block_id = u64::try_from(i).unwrap();
|
let block_id = u64::try_from(i).unwrap();
|
||||||
let block_timestamp = block_id.saturating_mul(100);
|
|
||||||
let clock_tx = NSSATransaction::Public(clock_invocation(block_timestamp));
|
|
||||||
|
|
||||||
let next_block = common::test_utils::produce_dummy_block(
|
let next_block =
|
||||||
block_id,
|
common::test_utils::produce_dummy_block(block_id, Some(prev_hash), vec![tx]);
|
||||||
Some(prev_hash),
|
|
||||||
vec![tx, clock_tx],
|
|
||||||
);
|
|
||||||
prev_hash = next_block.header.hash;
|
prev_hash = next_block.header.hash;
|
||||||
|
|
||||||
storage
|
storage
|
||||||
|
|||||||
@ -143,23 +143,27 @@ impl IndexerCore {
|
|||||||
l2_blocks_parsed_ids.sort_unstable();
|
l2_blocks_parsed_ids.sort_unstable();
|
||||||
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
|
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
|
||||||
|
|
||||||
for l2_block in l2_block_vec {
|
for l2_block in l2_block_vec {
|
||||||
// TODO: proper fix is to make the sequencer's genesis include a
|
// TODO: proper fix is to make the sequencer's genesis include a
|
||||||
// trailing `clock_invocation(0)` (and have the indexer's
|
// trailing `clock_invocation(0)` (and have the indexer's
|
||||||
// `open_db_with_genesis` not pre-apply state transitions) so the
|
// `open_db_with_genesis` not pre-apply state transitions) so the
|
||||||
// inscribed genesis can flow through `put_block` like any other
|
// inscribed genesis can flow through `put_block` like any other
|
||||||
// block. For now we skip re-applying it.
|
// block. For now we skip re-applying it.
|
||||||
//
|
//
|
||||||
// The channel-start (block_id == 1) is the sequencer's genesis
|
// The channel-start (block_id == 1) is the sequencer's genesis
|
||||||
// inscription that we re-discover during initial search. The
|
// inscription that we re-discover during initial search. The
|
||||||
// indexer already has its own locally-constructed genesis in
|
// indexer already has its own locally-constructed genesis in
|
||||||
// the store from `open_db_with_genesis`, so re-applying the
|
// the store from `open_db_with_genesis`, so re-applying the
|
||||||
// inscribed copy is both redundant and would fail the strict
|
// inscribed copy is both redundant and would fail the strict
|
||||||
// block validation in `put_block` (the inscribed genesis lacks
|
// block validation in `put_block` (the inscribed genesis lacks
|
||||||
// the trailing clock invocation).
|
// the trailing clock invocation).
|
||||||
if l2_block.header.block_id != 1 {
|
if l2_block.header.block_id != 1 {
|
||||||
self.store.put_block(l2_block.clone(), l1_header).await?;
|
self
|
||||||
}
|
.store
|
||||||
|
.put_block(l2_block.clone(), l1_header)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
|
||||||
|
}
|
||||||
|
|
||||||
yield Ok(l2_block);
|
yield Ok(l2_block);
|
||||||
}
|
}
|
||||||
|
|||||||
25
indexer_ffi/Cargo.toml
Normal file
25
indexer_ffi/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
[package]
|
||||||
|
edition = "2024"
|
||||||
|
license = { workspace = true }
|
||||||
|
name = "indexer_ffi"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
indexer_service.workspace = true
|
||||||
|
log = { workspace = true }
|
||||||
|
tokio = { features = ["rt-multi-thread"], workspace = true }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
cbindgen = "0.29"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
crate-type = ["rlib", "cdylib", "staticlib"]
|
||||||
|
name = "indexer_ffi"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[package.metadata.cargo-machete]
|
||||||
|
ignored = [
|
||||||
|
"cbindgen",
|
||||||
|
] # machete does not recognize this for build dep and complains.
|
||||||
12
indexer_ffi/build.rs
Normal file
12
indexer_ffi/build.rs
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
use std::env;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||||
|
println!("cargo:rerun-if-changed=src/");
|
||||||
|
cbindgen::Builder::new()
|
||||||
|
.with_crate(crate_dir)
|
||||||
|
.with_language(cbindgen::Language::C)
|
||||||
|
.generate()
|
||||||
|
.expect("Unable to generate bindings")
|
||||||
|
.write_to_file("indexer_ffi.h");
|
||||||
|
}
|
||||||
2
indexer_ffi/cbindgen.toml
Normal file
2
indexer_ffi/cbindgen.toml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
language = "C" # For increased compatibility
|
||||||
|
no_includes = true
|
||||||
76
indexer_ffi/indexer_ffi.h
Normal file
76
indexer_ffi/indexer_ffi.h
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#include <stdarg.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
typedef enum OperationStatus {
|
||||||
|
Ok = 0,
|
||||||
|
NullPointer = 1,
|
||||||
|
InitializationError = 2,
|
||||||
|
} OperationStatus;
|
||||||
|
|
||||||
|
typedef struct IndexerServiceFFI {
|
||||||
|
void *indexer_handle;
|
||||||
|
void *runtime;
|
||||||
|
} IndexerServiceFFI;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simple wrapper around a pointer to a value or an error.
|
||||||
|
*
|
||||||
|
* Pointer is not guaranteed. You should check the error field before
|
||||||
|
* dereferencing the pointer.
|
||||||
|
*/
|
||||||
|
typedef struct PointerResult_IndexerServiceFFI__OperationStatus {
|
||||||
|
struct IndexerServiceFFI *value;
|
||||||
|
enum OperationStatus error;
|
||||||
|
} PointerResult_IndexerServiceFFI__OperationStatus;
|
||||||
|
|
||||||
|
typedef struct PointerResult_IndexerServiceFFI__OperationStatus InitializedIndexerServiceFFIResult;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and starts an indexer based on the provided
|
||||||
|
* configuration file path.
|
||||||
|
*
|
||||||
|
* # Arguments
|
||||||
|
*
|
||||||
|
* - `config_path`: A pointer to a string representing the path to the configuration file.
|
||||||
|
* - `port`: Number representing a port, on which indexers RPC will start.
|
||||||
|
*
|
||||||
|
* # Returns
|
||||||
|
*
|
||||||
|
* An `InitializedIndexerServiceFFIResult` containing either a pointer to the
|
||||||
|
* initialized `IndexerServiceFFI` or an error code.
|
||||||
|
*/
|
||||||
|
InitializedIndexerServiceFFIResult start_indexer(const char *config_path, uint16_t port);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stops and frees the resources associated with the given indexer service.
|
||||||
|
*
|
||||||
|
* # Arguments
|
||||||
|
*
|
||||||
|
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
|
||||||
|
*
|
||||||
|
* # Returns
|
||||||
|
*
|
||||||
|
* An `OperationStatus` indicating success or failure.
|
||||||
|
*
|
||||||
|
* # Safety
|
||||||
|
*
|
||||||
|
* The caller must ensure that:
|
||||||
|
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
|
||||||
|
* - The `IndexerServiceFFI` instance was created by this library
|
||||||
|
* - The pointer will not be used after this function returns
|
||||||
|
*/
|
||||||
|
enum OperationStatus stop_indexer(struct IndexerServiceFFI *indexer);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* # Safety
|
||||||
|
* It's up to the caller to pass a proper pointer, if somehow from c/c++ side
|
||||||
|
* this is called with a type which doesn't come from a returned `CString` it
|
||||||
|
* will cause a segfault.
|
||||||
|
*/
|
||||||
|
void free_cstring(char *block);
|
||||||
|
|
||||||
|
bool is_ok(const enum OperationStatus *self);
|
||||||
|
|
||||||
|
bool is_error(const enum OperationStatus *self);
|
||||||
100
indexer_ffi/src/api/lifecycle.rs
Normal file
100
indexer_ffi/src/api/lifecycle.rs
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
use std::{ffi::c_char, path::PathBuf};
|
||||||
|
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
|
|
||||||
|
use crate::{IndexerServiceFFI, api::PointerResult, errors::OperationStatus};
|
||||||
|
|
||||||
|
pub type InitializedIndexerServiceFFIResult = PointerResult<IndexerServiceFFI, OperationStatus>;
|
||||||
|
|
||||||
|
/// Creates and starts an indexer based on the provided
|
||||||
|
/// configuration file path.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// - `config_path`: A pointer to a string representing the path to the configuration file.
|
||||||
|
/// - `port`: Number representing a port, on which indexers RPC will start.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// An `InitializedIndexerServiceFFIResult` containing either a pointer to the
|
||||||
|
/// initialized `IndexerServiceFFI` or an error code.
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub extern "C" fn start_indexer(
|
||||||
|
config_path: *const c_char,
|
||||||
|
port: u16,
|
||||||
|
) -> InitializedIndexerServiceFFIResult {
|
||||||
|
setup_indexer(config_path, port).map_or_else(
|
||||||
|
InitializedIndexerServiceFFIResult::from_error,
|
||||||
|
InitializedIndexerServiceFFIResult::from_value,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes and starts an indexer based on the provided
|
||||||
|
/// configuration file path.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// - `config_path`: A pointer to a string representing the path to the configuration file.
|
||||||
|
/// - `port`: Number representing a port, on which indexers RPC will start.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A `Result` containing either the initialized `IndexerServiceFFI` or an
|
||||||
|
/// error code.
|
||||||
|
fn setup_indexer(
|
||||||
|
config_path: *const c_char,
|
||||||
|
port: u16,
|
||||||
|
) -> Result<IndexerServiceFFI, OperationStatus> {
|
||||||
|
let user_config_path = PathBuf::from(
|
||||||
|
unsafe { std::ffi::CStr::from_ptr(config_path) }
|
||||||
|
.to_str()
|
||||||
|
.map_err(|e| {
|
||||||
|
log::error!("Could not convert the config path to string: {e}");
|
||||||
|
OperationStatus::InitializationError
|
||||||
|
})?,
|
||||||
|
);
|
||||||
|
let config = indexer_service::IndexerConfig::from_path(&user_config_path).map_err(|e| {
|
||||||
|
log::error!("Failed to read config: {e}");
|
||||||
|
OperationStatus::InitializationError
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let rt = Runtime::new().unwrap();
|
||||||
|
|
||||||
|
let indexer_handle = rt
|
||||||
|
.block_on(indexer_service::run_server(config, port))
|
||||||
|
.map_err(|e| {
|
||||||
|
log::error!("Could not start indexer service: {e}");
|
||||||
|
OperationStatus::InitializationError
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(IndexerServiceFFI::new(indexer_handle, rt))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops and frees the resources associated with the given indexer service.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// An `OperationStatus` indicating success or failure.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that:
|
||||||
|
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
|
||||||
|
/// - The `IndexerServiceFFI` instance was created by this library
|
||||||
|
/// - The pointer will not be used after this function returns
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub unsafe extern "C" fn stop_indexer(indexer: *mut IndexerServiceFFI) -> OperationStatus {
|
||||||
|
if indexer.is_null() {
|
||||||
|
log::error!("Attempted to stop a null indexer pointer. This is a bug. Aborting.");
|
||||||
|
return OperationStatus::NullPointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
let indexer = unsafe { Box::from_raw(indexer) };
|
||||||
|
drop(indexer);
|
||||||
|
|
||||||
|
OperationStatus::Ok
|
||||||
|
}
|
||||||
14
indexer_ffi/src/api/memory.rs
Normal file
14
indexer_ffi/src/api/memory.rs
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
use std::ffi::{CString, c_char};
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
/// It's up to the caller to pass a proper pointer, if somehow from c/c++ side
|
||||||
|
/// this is called with a type which doesn't come from a returned `CString` it
|
||||||
|
/// will cause a segfault.
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub unsafe extern "C" fn free_cstring(block: *mut c_char) {
|
||||||
|
if block.is_null() {
|
||||||
|
log::error!("Trying to free a null pointer. Exiting");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
drop(unsafe { CString::from_raw(block) });
|
||||||
|
}
|
||||||
5
indexer_ffi/src/api/mod.rs
Normal file
5
indexer_ffi/src/api/mod.rs
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
pub use result::PointerResult;
|
||||||
|
|
||||||
|
pub mod lifecycle;
|
||||||
|
pub mod memory;
|
||||||
|
pub mod result;
|
||||||
29
indexer_ffi/src/api/result.rs
Normal file
29
indexer_ffi/src/api/result.rs
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
/// Simple wrapper around a pointer to a value or an error.
|
||||||
|
///
|
||||||
|
/// Pointer is not guaranteed. You should check the error field before
|
||||||
|
/// dereferencing the pointer.
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct PointerResult<Type, Error> {
|
||||||
|
pub value: *mut Type,
|
||||||
|
pub error: Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Type, Error: Default> PointerResult<Type, Error> {
|
||||||
|
pub fn from_pointer(pointer: *mut Type) -> Self {
|
||||||
|
Self {
|
||||||
|
value: pointer,
|
||||||
|
error: Error::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_value(value: Type) -> Self {
|
||||||
|
Self::from_pointer(Box::into_raw(Box::new(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const fn from_error(error: Error) -> Self {
|
||||||
|
Self {
|
||||||
|
value: std::ptr::null_mut(),
|
||||||
|
error,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
22
indexer_ffi/src/errors.rs
Normal file
22
indexer_ffi/src/errors.rs
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#[derive(Debug, Default, PartialEq, Eq)]
|
||||||
|
#[repr(C)]
|
||||||
|
pub enum OperationStatus {
|
||||||
|
#[default]
|
||||||
|
Ok = 0x0,
|
||||||
|
NullPointer = 0x1,
|
||||||
|
InitializationError = 0x2,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OperationStatus {
|
||||||
|
#[must_use]
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub extern "C" fn is_ok(&self) -> bool {
|
||||||
|
*self == Self::Ok
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
#[unsafe(no_mangle)]
|
||||||
|
pub extern "C" fn is_error(&self) -> bool {
|
||||||
|
!self.is_ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
86
indexer_ffi/src/indexer.rs
Normal file
86
indexer_ffi/src/indexer.rs
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
use std::{ffi::c_void, net::SocketAddr};
|
||||||
|
|
||||||
|
use indexer_service::IndexerHandle;
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct IndexerServiceFFI {
|
||||||
|
indexer_handle: *mut c_void,
|
||||||
|
runtime: *mut c_void,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexerServiceFFI {
|
||||||
|
pub fn new(indexer_handle: indexer_service::IndexerHandle, runtime: Runtime) -> Self {
|
||||||
|
Self {
|
||||||
|
// Box the complex types and convert to opaque pointers
|
||||||
|
indexer_handle: Box::into_raw(Box::new(indexer_handle)).cast::<c_void>(),
|
||||||
|
runtime: Box::into_raw(Box::new(runtime)).cast::<c_void>(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to take ownership back.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that:
|
||||||
|
/// - `self` is a valid object(contains valid pointers in all fields)
|
||||||
|
#[must_use]
|
||||||
|
pub unsafe fn into_parts(self) -> (Box<IndexerHandle>, Box<Runtime>) {
|
||||||
|
let indexer_handle = unsafe { Box::from_raw(self.indexer_handle.cast::<IndexerHandle>()) };
|
||||||
|
let runtime = unsafe { Box::from_raw(self.runtime.cast::<Runtime>()) };
|
||||||
|
(indexer_handle, runtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to get indexer handle addr.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that:
|
||||||
|
/// - `self` is a valid object(contains valid pointers in all fields)
|
||||||
|
#[must_use]
|
||||||
|
pub const unsafe fn addr(&self) -> SocketAddr {
|
||||||
|
let indexer_handle = unsafe {
|
||||||
|
self.indexer_handle
|
||||||
|
.cast::<IndexerHandle>()
|
||||||
|
.as_ref()
|
||||||
|
.expect("Indexer Handle must be non-null pointer")
|
||||||
|
};
|
||||||
|
|
||||||
|
indexer_handle.addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to get indexer handle addr.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that:
|
||||||
|
/// - `self` is a valid object(contains valid pointers in all fields)
|
||||||
|
#[must_use]
|
||||||
|
pub const unsafe fn handle(&self) -> &IndexerHandle {
|
||||||
|
unsafe {
|
||||||
|
self.indexer_handle
|
||||||
|
.cast::<IndexerHandle>()
|
||||||
|
.as_ref()
|
||||||
|
.expect("Indexer Handle must be non-null pointer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement Drop to prevent memory leaks
|
||||||
|
impl Drop for IndexerServiceFFI {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let Self {
|
||||||
|
indexer_handle,
|
||||||
|
runtime,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
if indexer_handle.is_null() {
|
||||||
|
log::error!("Attempted to drop a null indexer pointer. This is a bug");
|
||||||
|
}
|
||||||
|
if runtime.is_null() {
|
||||||
|
log::error!("Attempted to drop a null tokio runtime pointer. This is a bug");
|
||||||
|
}
|
||||||
|
drop(unsafe { Box::from_raw(indexer_handle.cast::<IndexerHandle>()) });
|
||||||
|
drop(unsafe { Box::from_raw(runtime.cast::<Runtime>()) });
|
||||||
|
}
|
||||||
|
}
|
||||||
8
indexer_ffi/src/lib.rs
Normal file
8
indexer_ffi/src/lib.rs
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#![allow(clippy::undocumented_unsafe_blocks, reason = "It is an FFI")]
|
||||||
|
|
||||||
|
pub use errors::OperationStatus;
|
||||||
|
pub use indexer::IndexerServiceFFI;
|
||||||
|
|
||||||
|
pub mod api;
|
||||||
|
mod errors;
|
||||||
|
mod indexer;
|
||||||
@ -22,6 +22,7 @@ ata_core.workspace = true
|
|||||||
indexer_service_rpc.workspace = true
|
indexer_service_rpc.workspace = true
|
||||||
sequencer_service_rpc = { workspace = true, features = ["client"] }
|
sequencer_service_rpc = { workspace = true, features = ["client"] }
|
||||||
wallet-ffi.workspace = true
|
wallet-ffi.workspace = true
|
||||||
|
indexer_ffi.workspace = true
|
||||||
testnet_initial_state.workspace = true
|
testnet_initial_state.workspace = true
|
||||||
|
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
//! This library contains common code for integration tests.
|
//! This library contains common code for integration tests.
|
||||||
|
|
||||||
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
use anyhow::{Context as _, Result, bail};
|
use anyhow::{Context as _, Result};
|
||||||
use common::{HashType, transaction::NSSATransaction};
|
use common::{HashType, transaction::NSSATransaction};
|
||||||
use futures::FutureExt as _;
|
use futures::FutureExt as _;
|
||||||
use indexer_service::IndexerHandle;
|
use indexer_service::IndexerHandle;
|
||||||
use log::{debug, error, warn};
|
use log::{debug, error};
|
||||||
use nssa::{AccountId, PrivacyPreservingTransaction};
|
use nssa::{AccountId, PrivacyPreservingTransaction};
|
||||||
use nssa_core::Commitment;
|
use nssa_core::Commitment;
|
||||||
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
|
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
|
||||||
@ -14,9 +14,13 @@ use sequencer_service::SequencerHandle;
|
|||||||
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
|
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use testcontainers::compose::DockerCompose;
|
use testcontainers::compose::DockerCompose;
|
||||||
use wallet::{WalletCore, config::WalletConfigOverrides};
|
use wallet::WalletCore;
|
||||||
|
|
||||||
|
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet};
|
||||||
|
|
||||||
pub mod config;
|
pub mod config;
|
||||||
|
pub mod setup;
|
||||||
|
pub mod test_context_ffi;
|
||||||
|
|
||||||
// TODO: Remove this and control time from tests
|
// TODO: Remove this and control time from tests
|
||||||
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
|
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
|
||||||
@ -67,13 +71,13 @@ impl TestContext {
|
|||||||
|
|
||||||
debug!("Test context setup");
|
debug!("Test context setup");
|
||||||
|
|
||||||
let (bedrock_compose, bedrock_addr) = Self::setup_bedrock_node().await?;
|
let (bedrock_compose, bedrock_addr) = setup_bedrock_node().await?;
|
||||||
|
|
||||||
let (indexer_handle, temp_indexer_dir) = Self::setup_indexer(bedrock_addr, &initial_data)
|
let (indexer_handle, temp_indexer_dir) = setup_indexer(bedrock_addr, &initial_data)
|
||||||
.await
|
.await
|
||||||
.context("Failed to setup Indexer")?;
|
.context("Failed to setup Indexer")?;
|
||||||
|
|
||||||
let (sequencer_handle, temp_sequencer_dir) = Self::setup_sequencer(
|
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
|
||||||
sequencer_partial_config,
|
sequencer_partial_config,
|
||||||
bedrock_addr,
|
bedrock_addr,
|
||||||
indexer_handle.addr(),
|
indexer_handle.addr(),
|
||||||
@ -83,7 +87,7 @@ impl TestContext {
|
|||||||
.context("Failed to setup Sequencer")?;
|
.context("Failed to setup Sequencer")?;
|
||||||
|
|
||||||
let (wallet, temp_wallet_dir, wallet_password) =
|
let (wallet, temp_wallet_dir, wallet_password) =
|
||||||
Self::setup_wallet(sequencer_handle.addr(), &initial_data)
|
setup_wallet(sequencer_handle.addr(), &initial_data)
|
||||||
.await
|
.await
|
||||||
.context("Failed to setup wallet")?;
|
.context("Failed to setup wallet")?;
|
||||||
|
|
||||||
@ -112,165 +116,6 @@ impl TestContext {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
|
|
||||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
|
||||||
let bedrock_compose_path =
|
|
||||||
PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
|
|
||||||
|
|
||||||
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
|
|
||||||
.await
|
|
||||||
.context("Failed to setup docker compose for Bedrock")?
|
|
||||||
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
|
|
||||||
.with_env("PORT", "0");
|
|
||||||
|
|
||||||
#[expect(
|
|
||||||
clippy::items_after_statements,
|
|
||||||
reason = "This is more readable is this function used just after its definition"
|
|
||||||
)]
|
|
||||||
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
|
|
||||||
compose
|
|
||||||
.up()
|
|
||||||
.await
|
|
||||||
.context("Failed to bring up Bedrock services")?;
|
|
||||||
let container = compose
|
|
||||||
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`"
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let ports = container.ports().await.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to get ports for Bedrock service container `{}`",
|
|
||||||
container.id()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
ports
|
|
||||||
.map_to_host_port_ipv4(BEDROCK_SERVICE_PORT)
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to retrieve host port of {BEDROCK_SERVICE_PORT} container \
|
|
||||||
port for container `{}`, existing ports: {ports:?}",
|
|
||||||
container.id()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut port = None;
|
|
||||||
let mut attempt = 0_u32;
|
|
||||||
let max_attempts = 5_u32;
|
|
||||||
while port.is_none() && attempt < max_attempts {
|
|
||||||
attempt = attempt
|
|
||||||
.checked_add(1)
|
|
||||||
.expect("We check that attempt < max_attempts, so this won't overflow");
|
|
||||||
match up_and_retrieve_port(&mut compose).await {
|
|
||||||
Ok(p) => {
|
|
||||||
port = Some(p);
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to bring up Bedrock services: {err:?}, attempt {attempt}/{max_attempts}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let Some(port) = port else {
|
|
||||||
bail!("Failed to bring up Bedrock services after {max_attempts} attempts");
|
|
||||||
};
|
|
||||||
|
|
||||||
let addr = SocketAddr::from(([127, 0, 0, 1], port));
|
|
||||||
Ok((compose, addr))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup_indexer(
|
|
||||||
bedrock_addr: SocketAddr,
|
|
||||||
initial_data: &config::InitialData,
|
|
||||||
) -> Result<(IndexerHandle, TempDir)> {
|
|
||||||
let temp_indexer_dir =
|
|
||||||
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Using temp indexer home at {}",
|
|
||||||
temp_indexer_dir.path().display()
|
|
||||||
);
|
|
||||||
|
|
||||||
let indexer_config = config::indexer_config(
|
|
||||||
bedrock_addr,
|
|
||||||
temp_indexer_dir.path().to_owned(),
|
|
||||||
initial_data,
|
|
||||||
)
|
|
||||||
.context("Failed to create Indexer config")?;
|
|
||||||
|
|
||||||
indexer_service::run_server(indexer_config, 0)
|
|
||||||
.await
|
|
||||||
.context("Failed to run Indexer Service")
|
|
||||||
.map(|handle| (handle, temp_indexer_dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup_sequencer(
|
|
||||||
partial: config::SequencerPartialConfig,
|
|
||||||
bedrock_addr: SocketAddr,
|
|
||||||
indexer_addr: SocketAddr,
|
|
||||||
initial_data: &config::InitialData,
|
|
||||||
) -> Result<(SequencerHandle, TempDir)> {
|
|
||||||
let temp_sequencer_dir =
|
|
||||||
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Using temp sequencer home at {}",
|
|
||||||
temp_sequencer_dir.path().display()
|
|
||||||
);
|
|
||||||
|
|
||||||
let config = config::sequencer_config(
|
|
||||||
partial,
|
|
||||||
temp_sequencer_dir.path().to_owned(),
|
|
||||||
bedrock_addr,
|
|
||||||
indexer_addr,
|
|
||||||
initial_data,
|
|
||||||
)
|
|
||||||
.context("Failed to create Sequencer config")?;
|
|
||||||
|
|
||||||
let sequencer_handle = sequencer_service::run(config, 0).await?;
|
|
||||||
|
|
||||||
Ok((sequencer_handle, temp_sequencer_dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup_wallet(
|
|
||||||
sequencer_addr: SocketAddr,
|
|
||||||
initial_data: &config::InitialData,
|
|
||||||
) -> Result<(WalletCore, TempDir, String)> {
|
|
||||||
let config = config::wallet_config(sequencer_addr, initial_data)
|
|
||||||
.context("Failed to create Wallet config")?;
|
|
||||||
let config_serialized =
|
|
||||||
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
|
|
||||||
|
|
||||||
let temp_wallet_dir =
|
|
||||||
tempfile::tempdir().context("Failed to create temp dir for wallet home")?;
|
|
||||||
|
|
||||||
let config_path = temp_wallet_dir.path().join("wallet_config.json");
|
|
||||||
std::fs::write(&config_path, config_serialized)
|
|
||||||
.context("Failed to write wallet config in temp dir")?;
|
|
||||||
|
|
||||||
let storage_path = temp_wallet_dir.path().join("storage.json");
|
|
||||||
let config_overrides = WalletConfigOverrides::default();
|
|
||||||
|
|
||||||
let wallet_password = "test_pass".to_owned();
|
|
||||||
let (wallet, _mnemonic) = WalletCore::new_init_storage(
|
|
||||||
config_path,
|
|
||||||
storage_path,
|
|
||||||
Some(config_overrides),
|
|
||||||
&wallet_password,
|
|
||||||
)
|
|
||||||
.context("Failed to init wallet")?;
|
|
||||||
wallet
|
|
||||||
.store_persistent_data()
|
|
||||||
.await
|
|
||||||
.context("Failed to store wallet persistent data")?;
|
|
||||||
|
|
||||||
Ok((wallet, temp_wallet_dir, wallet_password))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get reference to the wallet.
|
/// Get reference to the wallet.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub const fn wallet(&self) -> &WalletCore {
|
pub const fn wallet(&self) -> &WalletCore {
|
||||||
|
|||||||
220
integration_tests/src/setup.rs
Normal file
220
integration_tests/src/setup.rs
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
use std::{
|
||||||
|
ffi::{CString, c_char},
|
||||||
|
fs::File,
|
||||||
|
io::Write as _,
|
||||||
|
net::SocketAddr,
|
||||||
|
path::PathBuf,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Result, bail};
|
||||||
|
use indexer_ffi::{IndexerServiceFFI, api::lifecycle::InitializedIndexerServiceFFIResult};
|
||||||
|
use indexer_service::IndexerHandle;
|
||||||
|
use log::{debug, warn};
|
||||||
|
use sequencer_service::SequencerHandle;
|
||||||
|
use tempfile::TempDir;
|
||||||
|
use testcontainers::compose::DockerCompose;
|
||||||
|
use wallet::{WalletCore, config::WalletConfigOverrides};
|
||||||
|
|
||||||
|
use crate::{BEDROCK_SERVICE_PORT, BEDROCK_SERVICE_WITH_OPEN_PORT, config};
|
||||||
|
|
||||||
|
unsafe extern "C" {
|
||||||
|
fn start_indexer(config_path: *const c_char, port: u16) -> InitializedIndexerServiceFFIResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
|
||||||
|
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||||
|
let bedrock_compose_path = PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
|
||||||
|
|
||||||
|
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
|
||||||
|
.await
|
||||||
|
.context("Failed to setup docker compose for Bedrock")?
|
||||||
|
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
|
||||||
|
.with_env("PORT", "0");
|
||||||
|
|
||||||
|
#[expect(
|
||||||
|
clippy::items_after_statements,
|
||||||
|
reason = "This is more readable is this function used just after its definition"
|
||||||
|
)]
|
||||||
|
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
|
||||||
|
compose
|
||||||
|
.up()
|
||||||
|
.await
|
||||||
|
.context("Failed to bring up Bedrock services")?;
|
||||||
|
let container = compose
|
||||||
|
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let ports = container.ports().await.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to get ports for Bedrock service container `{}`",
|
||||||
|
container.id()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
ports
|
||||||
|
.map_to_host_port_ipv4(BEDROCK_SERVICE_PORT)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to retrieve host port of {BEDROCK_SERVICE_PORT} container \
|
||||||
|
port for container `{}`, existing ports: {ports:?}",
|
||||||
|
container.id()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut port = None;
|
||||||
|
let mut attempt = 0_u32;
|
||||||
|
let max_attempts = 5_u32;
|
||||||
|
while port.is_none() && attempt < max_attempts {
|
||||||
|
attempt = attempt
|
||||||
|
.checked_add(1)
|
||||||
|
.expect("We check that attempt < max_attempts, so this won't overflow");
|
||||||
|
match up_and_retrieve_port(&mut compose).await {
|
||||||
|
Ok(p) => {
|
||||||
|
port = Some(p);
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to bring up Bedrock services: {err:?}, attempt {attempt}/{max_attempts}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let Some(port) = port else {
|
||||||
|
bail!("Failed to bring up Bedrock services after {max_attempts} attempts");
|
||||||
|
};
|
||||||
|
|
||||||
|
let addr = SocketAddr::from(([127, 0, 0, 1], port));
|
||||||
|
Ok((compose, addr))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn setup_indexer(
|
||||||
|
bedrock_addr: SocketAddr,
|
||||||
|
initial_data: &config::InitialData,
|
||||||
|
) -> Result<(IndexerHandle, TempDir)> {
|
||||||
|
let temp_indexer_dir =
|
||||||
|
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Using temp indexer home at {}",
|
||||||
|
temp_indexer_dir.path().display()
|
||||||
|
);
|
||||||
|
|
||||||
|
let indexer_config = config::indexer_config(
|
||||||
|
bedrock_addr,
|
||||||
|
temp_indexer_dir.path().to_owned(),
|
||||||
|
initial_data,
|
||||||
|
)
|
||||||
|
.context("Failed to create Indexer config")?;
|
||||||
|
|
||||||
|
indexer_service::run_server(indexer_config, 0)
|
||||||
|
.await
|
||||||
|
.context("Failed to run Indexer Service")
|
||||||
|
.map(|handle| (handle, temp_indexer_dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn setup_sequencer(
|
||||||
|
partial: config::SequencerPartialConfig,
|
||||||
|
bedrock_addr: SocketAddr,
|
||||||
|
indexer_addr: SocketAddr,
|
||||||
|
initial_data: &config::InitialData,
|
||||||
|
) -> Result<(SequencerHandle, TempDir)> {
|
||||||
|
let temp_sequencer_dir =
|
||||||
|
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Using temp sequencer home at {}",
|
||||||
|
temp_sequencer_dir.path().display()
|
||||||
|
);
|
||||||
|
|
||||||
|
let config = config::sequencer_config(
|
||||||
|
partial,
|
||||||
|
temp_sequencer_dir.path().to_owned(),
|
||||||
|
bedrock_addr,
|
||||||
|
indexer_addr,
|
||||||
|
initial_data,
|
||||||
|
)
|
||||||
|
.context("Failed to create Sequencer config")?;
|
||||||
|
|
||||||
|
let sequencer_handle = sequencer_service::run(config, 0).await?;
|
||||||
|
|
||||||
|
Ok((sequencer_handle, temp_sequencer_dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn setup_wallet(
|
||||||
|
sequencer_addr: SocketAddr,
|
||||||
|
initial_data: &config::InitialData,
|
||||||
|
) -> Result<(WalletCore, TempDir, String)> {
|
||||||
|
let config = config::wallet_config(sequencer_addr, initial_data)
|
||||||
|
.context("Failed to create Wallet config")?;
|
||||||
|
let config_serialized =
|
||||||
|
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
|
||||||
|
|
||||||
|
let temp_wallet_dir =
|
||||||
|
tempfile::tempdir().context("Failed to create temp dir for wallet home")?;
|
||||||
|
|
||||||
|
let config_path = temp_wallet_dir.path().join("wallet_config.json");
|
||||||
|
std::fs::write(&config_path, config_serialized)
|
||||||
|
.context("Failed to write wallet config in temp dir")?;
|
||||||
|
|
||||||
|
let storage_path = temp_wallet_dir.path().join("storage.json");
|
||||||
|
let config_overrides = WalletConfigOverrides::default();
|
||||||
|
|
||||||
|
let wallet_password = "test_pass".to_owned();
|
||||||
|
let (wallet, _mnemonic) = WalletCore::new_init_storage(
|
||||||
|
config_path,
|
||||||
|
storage_path,
|
||||||
|
Some(config_overrides),
|
||||||
|
&wallet_password,
|
||||||
|
)
|
||||||
|
.context("Failed to init wallet")?;
|
||||||
|
wallet
|
||||||
|
.store_persistent_data()
|
||||||
|
.await
|
||||||
|
.context("Failed to store wallet persistent data")?;
|
||||||
|
|
||||||
|
Ok((wallet, temp_wallet_dir, wallet_password))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn setup_indexer_ffi(
|
||||||
|
bedrock_addr: SocketAddr,
|
||||||
|
initial_data: &config::InitialData,
|
||||||
|
) -> Result<(IndexerServiceFFI, TempDir)> {
|
||||||
|
let temp_indexer_dir =
|
||||||
|
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Using temp indexer home at {}",
|
||||||
|
temp_indexer_dir.path().display()
|
||||||
|
);
|
||||||
|
|
||||||
|
let indexer_config = config::indexer_config(
|
||||||
|
bedrock_addr,
|
||||||
|
temp_indexer_dir.path().to_owned(),
|
||||||
|
initial_data,
|
||||||
|
)
|
||||||
|
.context("Failed to create Indexer config")?;
|
||||||
|
|
||||||
|
let config_json = serde_json::to_vec(&indexer_config)?;
|
||||||
|
let config_path = temp_indexer_dir.path().join("indexer_config.json");
|
||||||
|
let mut file = File::create(config_path.as_path())?;
|
||||||
|
file.write_all(&config_json)?;
|
||||||
|
file.flush()?;
|
||||||
|
|
||||||
|
let res =
|
||||||
|
// SAFETY: lib function ensures validity of value.
|
||||||
|
unsafe { start_indexer(CString::new(config_path.to_str().unwrap())?.as_ptr(), 0) };
|
||||||
|
|
||||||
|
if res.error.is_error() {
|
||||||
|
anyhow::bail!("Indexer FFI error {:?}", res.error);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
// SAFETY: lib function ensures validity of value.
|
||||||
|
unsafe { std::ptr::read(res.value) },
|
||||||
|
temp_indexer_dir,
|
||||||
|
))
|
||||||
|
}
|
||||||
296
integration_tests/src/test_context_ffi.rs
Normal file
296
integration_tests/src/test_context_ffi.rs
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use futures::FutureExt as _;
|
||||||
|
use indexer_ffi::IndexerServiceFFI;
|
||||||
|
use indexer_service_rpc::RpcClient as _;
|
||||||
|
use log::{debug, error};
|
||||||
|
use nssa::AccountId;
|
||||||
|
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
|
||||||
|
use sequencer_service::SequencerHandle;
|
||||||
|
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
|
||||||
|
use tempfile::TempDir;
|
||||||
|
use testcontainers::compose::DockerCompose;
|
||||||
|
use wallet::WalletCore;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
|
||||||
|
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Test context which sets up a sequencer, indexer through ffi and a wallet for integration tests.
|
||||||
|
///
|
||||||
|
/// It's memory and logically safe to create multiple instances of this struct in parallel tests,
|
||||||
|
/// as each instance uses its own temporary directories for sequencer and wallet data.
|
||||||
|
// NOTE: Order of fields is important for proper drop order.
|
||||||
|
pub struct TestContextFFI {
|
||||||
|
sequencer_client: SequencerClient,
|
||||||
|
indexer_client: IndexerClient,
|
||||||
|
wallet: WalletCore,
|
||||||
|
wallet_password: String,
|
||||||
|
/// Optional to move out value in Drop.
|
||||||
|
sequencer_handle: Option<SequencerHandle>,
|
||||||
|
bedrock_compose: DockerCompose,
|
||||||
|
_temp_indexer_dir: TempDir,
|
||||||
|
_temp_sequencer_dir: TempDir,
|
||||||
|
_temp_wallet_dir: TempDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[expect(
|
||||||
|
clippy::multiple_inherent_impl,
|
||||||
|
reason = "It is more natural to have this implementation here"
|
||||||
|
)]
|
||||||
|
impl TestContextBuilder {
|
||||||
|
pub fn build_ffi(
|
||||||
|
self,
|
||||||
|
runtime: &Arc<tokio::runtime::Runtime>,
|
||||||
|
) -> Result<(TestContextFFI, IndexerServiceFFI)> {
|
||||||
|
TestContextFFI::new_configured(
|
||||||
|
self.sequencer_partial_config.unwrap_or_default(),
|
||||||
|
&self.initial_data.unwrap_or_else(|| {
|
||||||
|
config::InitialData::with_two_public_and_two_private_initialized_accounts()
|
||||||
|
}),
|
||||||
|
runtime,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestContextFFI {
|
||||||
|
/// Create new test context.
|
||||||
|
pub fn new(runtime: &Arc<tokio::runtime::Runtime>) -> Result<(Self, IndexerServiceFFI)> {
|
||||||
|
Self::builder().build_ffi(runtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub const fn builder() -> TestContextBuilder {
|
||||||
|
TestContextBuilder::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_configured(
|
||||||
|
sequencer_partial_config: config::SequencerPartialConfig,
|
||||||
|
initial_data: &config::InitialData,
|
||||||
|
runtime: &Arc<tokio::runtime::Runtime>,
|
||||||
|
) -> Result<(Self, IndexerServiceFFI)> {
|
||||||
|
// Ensure logger is initialized only once
|
||||||
|
*LOGGER;
|
||||||
|
|
||||||
|
debug!("Test context setup");
|
||||||
|
|
||||||
|
let (bedrock_compose, bedrock_addr) = runtime.block_on(setup_bedrock_node())?;
|
||||||
|
|
||||||
|
let (indexer_ffi, temp_indexer_dir) =
|
||||||
|
setup_indexer_ffi(bedrock_addr, initial_data).context("Failed to setup Indexer")?;
|
||||||
|
|
||||||
|
let (sequencer_handle, temp_sequencer_dir) = runtime
|
||||||
|
.block_on(setup_sequencer(
|
||||||
|
sequencer_partial_config,
|
||||||
|
bedrock_addr,
|
||||||
|
// SAFETY: addr is valid if indexer_ffi is valid.
|
||||||
|
unsafe { indexer_ffi.addr() },
|
||||||
|
initial_data,
|
||||||
|
))
|
||||||
|
.context("Failed to setup Sequencer")?;
|
||||||
|
|
||||||
|
let (wallet, temp_wallet_dir, wallet_password) = runtime
|
||||||
|
.block_on(setup_wallet(sequencer_handle.addr(), initial_data))
|
||||||
|
.context("Failed to setup wallet")?;
|
||||||
|
|
||||||
|
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
|
||||||
|
.context("Failed to convert sequencer addr to URL")?;
|
||||||
|
let indexer_url = config::addr_to_url(
|
||||||
|
config::UrlProtocol::Ws,
|
||||||
|
// SAFETY: addr is valid if indexer_ffi is valid.
|
||||||
|
unsafe { indexer_ffi.addr() },
|
||||||
|
)
|
||||||
|
.context("Failed to convert indexer addr to URL")?;
|
||||||
|
let sequencer_client = SequencerClientBuilder::default()
|
||||||
|
.build(sequencer_url)
|
||||||
|
.context("Failed to create sequencer client")?;
|
||||||
|
let indexer_client = runtime
|
||||||
|
.block_on(IndexerClient::new(&indexer_url))
|
||||||
|
.context("Failed to create indexer client")?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
Self {
|
||||||
|
sequencer_client,
|
||||||
|
indexer_client,
|
||||||
|
wallet,
|
||||||
|
wallet_password,
|
||||||
|
bedrock_compose,
|
||||||
|
sequencer_handle: Some(sequencer_handle),
|
||||||
|
_temp_indexer_dir: temp_indexer_dir,
|
||||||
|
_temp_sequencer_dir: temp_sequencer_dir,
|
||||||
|
_temp_wallet_dir: temp_wallet_dir,
|
||||||
|
},
|
||||||
|
indexer_ffi,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get reference to the wallet.
|
||||||
|
#[must_use]
|
||||||
|
pub const fn wallet(&self) -> &WalletCore {
|
||||||
|
&self.wallet
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn wallet_password(&self) -> &str {
|
||||||
|
&self.wallet_password
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get mutable reference to the wallet.
|
||||||
|
pub const fn wallet_mut(&mut self) -> &mut WalletCore {
|
||||||
|
&mut self.wallet
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get reference to the sequencer client.
|
||||||
|
#[must_use]
|
||||||
|
pub const fn sequencer_client(&self) -> &SequencerClient {
|
||||||
|
&self.sequencer_client
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get reference to the indexer client.
|
||||||
|
#[must_use]
|
||||||
|
pub const fn indexer_client(&self) -> &IndexerClient {
|
||||||
|
&self.indexer_client
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get existing public account IDs in the wallet.
|
||||||
|
#[must_use]
|
||||||
|
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
|
||||||
|
self.wallet
|
||||||
|
.storage()
|
||||||
|
.user_data
|
||||||
|
.public_account_ids()
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get existing private account IDs in the wallet.
|
||||||
|
#[must_use]
|
||||||
|
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
|
||||||
|
self.wallet
|
||||||
|
.storage()
|
||||||
|
.user_data
|
||||||
|
.private_account_ids()
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_last_block_sequencer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
|
||||||
|
Ok(runtime.block_on(self.sequencer_client.get_last_block_id())?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_last_block_indexer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
|
||||||
|
Ok(runtime.block_on(self.indexer_client.get_last_finalized_block_id())?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for TestContextFFI {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let Self {
|
||||||
|
sequencer_handle,
|
||||||
|
bedrock_compose,
|
||||||
|
_temp_indexer_dir: _,
|
||||||
|
_temp_sequencer_dir: _,
|
||||||
|
_temp_wallet_dir: _,
|
||||||
|
sequencer_client: _,
|
||||||
|
indexer_client: _,
|
||||||
|
wallet: _,
|
||||||
|
wallet_password: _,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
let sequencer_handle = sequencer_handle
|
||||||
|
.take()
|
||||||
|
.expect("Sequencer handle should be present in TestContext drop");
|
||||||
|
if !sequencer_handle.is_healthy() {
|
||||||
|
let Err(err) = sequencer_handle
|
||||||
|
.failed()
|
||||||
|
.now_or_never()
|
||||||
|
.expect("Sequencer handle should not be running");
|
||||||
|
error!(
|
||||||
|
"Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let container = bedrock_compose
|
||||||
|
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
panic!("Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`")
|
||||||
|
});
|
||||||
|
let output = std::process::Command::new("docker")
|
||||||
|
.args(["inspect", "-f", "{{.State.Running}}", container.id()])
|
||||||
|
.output()
|
||||||
|
.expect("Failed to execute docker inspect command to check if Bedrock container is still running");
|
||||||
|
let stdout = String::from_utf8(output.stdout)
|
||||||
|
.expect("Failed to parse docker inspect output as String");
|
||||||
|
if stdout.trim() != "true" {
|
||||||
|
error!(
|
||||||
|
"Bedrock container `{}` is not running during TestContext drop, docker inspect output: {stdout}",
|
||||||
|
container.id()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A test context with ffi to be used in normal #[test] tests.
|
||||||
|
pub struct BlockingTestContextFFI {
|
||||||
|
ctx: Option<TestContextFFI>,
|
||||||
|
runtime: Arc<tokio::runtime::Runtime>,
|
||||||
|
indexer_ffi: IndexerServiceFFI,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockingTestContextFFI {
|
||||||
|
pub fn new() -> Result<Self> {
|
||||||
|
let runtime = tokio::runtime::Runtime::new().unwrap();
|
||||||
|
let runtime_wrapped = Arc::new(runtime);
|
||||||
|
let (ctx, indexer_ffi) = TestContextFFI::new(&runtime_wrapped)?;
|
||||||
|
Ok(Self {
|
||||||
|
ctx: Some(ctx),
|
||||||
|
runtime: runtime_wrapped,
|
||||||
|
indexer_ffi,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub const fn ctx(&self) -> &TestContextFFI {
|
||||||
|
self.ctx.as_ref().expect("TestContext is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub const fn ctx_mut(&mut self) -> &mut TestContextFFI {
|
||||||
|
self.ctx.as_mut().expect("TestContext is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub const fn runtime(&self) -> &Arc<tokio::runtime::Runtime> {
|
||||||
|
&self.runtime
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn runtime_clone(&self) -> Arc<tokio::runtime::Runtime> {
|
||||||
|
Arc::<tokio::runtime::Runtime>::clone(&self.runtime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for BlockingTestContextFFI {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let Self {
|
||||||
|
ctx,
|
||||||
|
runtime,
|
||||||
|
indexer_ffi,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
// Ensure async cleanup of TestContext by blocking on its drop in the runtime.
|
||||||
|
runtime.block_on(async {
|
||||||
|
if let Some(ctx) = ctx.take() {
|
||||||
|
drop(ctx);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let indexer_handle =
|
||||||
|
// SAFETY: lib function ensures validity of value.
|
||||||
|
unsafe { indexer_ffi.handle() };
|
||||||
|
|
||||||
|
if !indexer_handle.is_healthy() {
|
||||||
|
error!("Indexer handle has unexpectedly stopped before TestContext drop");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -14,7 +14,6 @@ use integration_tests::{
|
|||||||
};
|
};
|
||||||
use log::info;
|
use log::info;
|
||||||
use nssa::AccountId;
|
use nssa::AccountId;
|
||||||
use tokio::test;
|
|
||||||
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
|
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
|
||||||
|
|
||||||
/// Maximum time to wait for the indexer to catch up to the sequencer.
|
/// Maximum time to wait for the indexer to catch up to the sequencer.
|
||||||
@ -53,7 +52,7 @@ async fn wait_for_indexer_to_catch_up(ctx: &TestContext) -> u64 {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
async fn indexer_test_run() -> Result<()> {
|
async fn indexer_test_run() -> Result<()> {
|
||||||
let ctx = TestContext::new().await?;
|
let ctx = TestContext::new().await?;
|
||||||
|
|
||||||
@ -70,7 +69,7 @@ async fn indexer_test_run() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
async fn indexer_block_batching() -> Result<()> {
|
async fn indexer_block_batching() -> Result<()> {
|
||||||
let ctx = TestContext::new().await?;
|
let ctx = TestContext::new().await?;
|
||||||
|
|
||||||
@ -101,7 +100,7 @@ async fn indexer_block_batching() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
async fn indexer_state_consistency() -> Result<()> {
|
async fn indexer_state_consistency() -> Result<()> {
|
||||||
let mut ctx = TestContext::new().await?;
|
let mut ctx = TestContext::new().await?;
|
||||||
|
|
||||||
@ -204,7 +203,7 @@ async fn indexer_state_consistency() -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
async fn indexer_state_consistency_with_labels() -> Result<()> {
|
async fn indexer_state_consistency_with_labels() -> Result<()> {
|
||||||
let mut ctx = TestContext::new().await?;
|
let mut ctx = TestContext::new().await?;
|
||||||
|
|
||||||
|
|||||||
284
integration_tests/tests/indexer_ffi.rs
Normal file
284
integration_tests/tests/indexer_ffi.rs
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
#![expect(
|
||||||
|
clippy::shadow_unrelated,
|
||||||
|
clippy::tests_outside_test_module,
|
||||||
|
reason = "We don't care about these in tests"
|
||||||
|
)]
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use indexer_service_rpc::RpcClient as _;
|
||||||
|
use integration_tests::{
|
||||||
|
TIME_TO_WAIT_FOR_BLOCK_SECONDS, format_private_account_id, format_public_account_id,
|
||||||
|
test_context_ffi::BlockingTestContextFFI, verify_commitment_is_in_state,
|
||||||
|
};
|
||||||
|
use log::info;
|
||||||
|
use nssa::AccountId;
|
||||||
|
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
|
||||||
|
|
||||||
|
/// Maximum time to wait for the indexer to catch up to the sequencer.
|
||||||
|
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 180_000;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexer_test_run_ffi() -> Result<()> {
|
||||||
|
let blocking_ctx = BlockingTestContextFFI::new()?;
|
||||||
|
let runtime_wrapped = blocking_ctx.runtime();
|
||||||
|
|
||||||
|
// RUN OBSERVATION
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let last_block_indexer = blocking_ctx.ctx().get_last_block_indexer(runtime_wrapped)?;
|
||||||
|
|
||||||
|
info!("Last block on ind now is {last_block_indexer}");
|
||||||
|
|
||||||
|
assert!(last_block_indexer > 1);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexer_ffi_block_batching() -> Result<()> {
|
||||||
|
let blocking_ctx = BlockingTestContextFFI::new()?;
|
||||||
|
let runtime_wrapped = blocking_ctx.runtime();
|
||||||
|
let ctx = blocking_ctx.ctx();
|
||||||
|
|
||||||
|
// WAIT
|
||||||
|
info!("Waiting for indexer to parse blocks");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let last_block_indexer = runtime_wrapped
|
||||||
|
.block_on(ctx.indexer_client().get_last_finalized_block_id())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
info!("Last block on ind now is {last_block_indexer}");
|
||||||
|
|
||||||
|
assert!(last_block_indexer > 1);
|
||||||
|
|
||||||
|
// Getting wide batch to fit all blocks (from latest backwards)
|
||||||
|
let mut block_batch = runtime_wrapped
|
||||||
|
.block_on(ctx.indexer_client().get_blocks(None, 100))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Reverse to check chain consistency from oldest to newest
|
||||||
|
block_batch.reverse();
|
||||||
|
|
||||||
|
// Checking chain consistency
|
||||||
|
let mut prev_block_hash = block_batch.first().unwrap().header.hash;
|
||||||
|
|
||||||
|
for block in &block_batch[1..] {
|
||||||
|
assert_eq!(block.header.prev_block_hash, prev_block_hash);
|
||||||
|
|
||||||
|
info!("Block {} chain-consistent", block.header.block_id);
|
||||||
|
|
||||||
|
prev_block_hash = block.header.hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexer_ffi_state_consistency() -> Result<()> {
|
||||||
|
let mut blocking_ctx = BlockingTestContextFFI::new()?;
|
||||||
|
let runtime_wrapped = blocking_ctx.runtime_clone();
|
||||||
|
let ctx = blocking_ctx.ctx_mut();
|
||||||
|
|
||||||
|
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
|
||||||
|
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
|
||||||
|
from_label: None,
|
||||||
|
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
|
||||||
|
to_label: None,
|
||||||
|
to_npk: None,
|
||||||
|
to_vpk: None,
|
||||||
|
amount: 100,
|
||||||
|
});
|
||||||
|
|
||||||
|
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
|
||||||
|
|
||||||
|
info!("Waiting for next block creation");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(
|
||||||
|
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
info!("Checking correct balance move");
|
||||||
|
let acc_1_balance =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[0],
|
||||||
|
))?;
|
||||||
|
let acc_2_balance =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[1],
|
||||||
|
))?;
|
||||||
|
|
||||||
|
info!("Balance of sender: {acc_1_balance:#?}");
|
||||||
|
info!("Balance of receiver: {acc_2_balance:#?}");
|
||||||
|
|
||||||
|
assert_eq!(acc_1_balance, 9900);
|
||||||
|
assert_eq!(acc_2_balance, 20100);
|
||||||
|
|
||||||
|
let from: AccountId = ctx.existing_private_accounts()[0];
|
||||||
|
let to: AccountId = ctx.existing_private_accounts()[1];
|
||||||
|
|
||||||
|
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
|
||||||
|
from: Some(format_private_account_id(from)),
|
||||||
|
from_label: None,
|
||||||
|
to: Some(format_private_account_id(to)),
|
||||||
|
to_label: None,
|
||||||
|
to_npk: None,
|
||||||
|
to_vpk: None,
|
||||||
|
amount: 100,
|
||||||
|
});
|
||||||
|
|
||||||
|
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
|
||||||
|
|
||||||
|
info!("Waiting for next block creation");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(
|
||||||
|
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let new_commitment1 = ctx
|
||||||
|
.wallet()
|
||||||
|
.get_private_account_commitment(from)
|
||||||
|
.context("Failed to get private account commitment for sender")?;
|
||||||
|
let commitment_check1 = runtime_wrapped.block_on(verify_commitment_is_in_state(
|
||||||
|
new_commitment1,
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
));
|
||||||
|
assert!(commitment_check1);
|
||||||
|
|
||||||
|
let new_commitment2 = ctx
|
||||||
|
.wallet()
|
||||||
|
.get_private_account_commitment(to)
|
||||||
|
.context("Failed to get private account commitment for receiver")?;
|
||||||
|
let commitment_check2 = runtime_wrapped.block_on(verify_commitment_is_in_state(
|
||||||
|
new_commitment2,
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
));
|
||||||
|
assert!(commitment_check2);
|
||||||
|
|
||||||
|
info!("Successfully transferred privately to owned account");
|
||||||
|
|
||||||
|
// WAIT
|
||||||
|
info!("Waiting for indexer to parse blocks");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let acc1_ind_state = runtime_wrapped.block_on(
|
||||||
|
ctx.indexer_client()
|
||||||
|
.get_account(ctx.existing_public_accounts()[0].into()),
|
||||||
|
)?;
|
||||||
|
let acc2_ind_state = runtime_wrapped.block_on(
|
||||||
|
ctx.indexer_client()
|
||||||
|
.get_account(ctx.existing_public_accounts()[1].into()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
info!("Checking correct state transition");
|
||||||
|
let acc1_seq_state =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[0],
|
||||||
|
))?;
|
||||||
|
let acc2_seq_state =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[1],
|
||||||
|
))?;
|
||||||
|
|
||||||
|
assert_eq!(acc1_ind_state, acc1_seq_state.into());
|
||||||
|
assert_eq!(acc2_ind_state, acc2_seq_state.into());
|
||||||
|
|
||||||
|
// ToDo: Check private state transition
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
|
||||||
|
let mut blocking_ctx = BlockingTestContextFFI::new()?;
|
||||||
|
let runtime_wrapped = blocking_ctx.runtime_clone();
|
||||||
|
let ctx = blocking_ctx.ctx_mut();
|
||||||
|
|
||||||
|
// Assign labels to both accounts
|
||||||
|
let from_label = "idx-sender-label".to_owned();
|
||||||
|
let to_label_str = "idx-receiver-label".to_owned();
|
||||||
|
|
||||||
|
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
|
||||||
|
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
|
||||||
|
account_label: None,
|
||||||
|
label: from_label.clone(),
|
||||||
|
});
|
||||||
|
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
|
||||||
|
|
||||||
|
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
|
||||||
|
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
|
||||||
|
account_label: None,
|
||||||
|
label: to_label_str.clone(),
|
||||||
|
});
|
||||||
|
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
|
||||||
|
|
||||||
|
// Send using labels instead of account IDs
|
||||||
|
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
|
||||||
|
from: None,
|
||||||
|
from_label: Some(from_label),
|
||||||
|
to: None,
|
||||||
|
to_label: Some(to_label_str),
|
||||||
|
to_npk: None,
|
||||||
|
to_vpk: None,
|
||||||
|
amount: 100,
|
||||||
|
});
|
||||||
|
|
||||||
|
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
|
||||||
|
|
||||||
|
info!("Waiting for next block creation");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(
|
||||||
|
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let acc_1_balance =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[0],
|
||||||
|
))?;
|
||||||
|
let acc_2_balance =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[1],
|
||||||
|
))?;
|
||||||
|
|
||||||
|
assert_eq!(acc_1_balance, 9900);
|
||||||
|
assert_eq!(acc_2_balance, 20100);
|
||||||
|
|
||||||
|
info!("Waiting for indexer to parse blocks");
|
||||||
|
runtime_wrapped.block_on(async {
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let acc1_ind_state = runtime_wrapped.block_on(
|
||||||
|
ctx.indexer_client()
|
||||||
|
.get_account(ctx.existing_public_accounts()[0].into()),
|
||||||
|
)?;
|
||||||
|
let acc1_seq_state =
|
||||||
|
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
|
||||||
|
ctx.sequencer_client(),
|
||||||
|
ctx.existing_public_accounts()[0],
|
||||||
|
))?;
|
||||||
|
|
||||||
|
assert_eq!(acc1_ind_state, acc1_seq_state.into());
|
||||||
|
|
||||||
|
info!("Indexer state is consistent after label-based transfer");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -1,6 +1,9 @@
|
|||||||
use std::{path::Path, sync::Arc};
|
use std::{path::Path, sync::Arc};
|
||||||
|
|
||||||
use common::block::Block;
|
use common::{
|
||||||
|
block::Block,
|
||||||
|
transaction::{NSSATransaction, clock_invocation},
|
||||||
|
};
|
||||||
use nssa::V03State;
|
use nssa::V03State;
|
||||||
use rocksdb::{
|
use rocksdb::{
|
||||||
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
|
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
|
||||||
@ -169,22 +172,52 @@ impl RocksDBIO {
|
|||||||
for block in self.get_block_batch_seq(
|
for block in self.get_block_batch_seq(
|
||||||
start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id,
|
start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id,
|
||||||
)? {
|
)? {
|
||||||
for transaction in block.body.transactions {
|
let expected_clock =
|
||||||
transaction
|
NSSATransaction::Public(clock_invocation(block.header.timestamp));
|
||||||
.transaction_stateless_check()
|
|
||||||
.map_err(|err| {
|
if let Some((clock_tx, user_txs)) = block.body.transactions.split_last() {
|
||||||
DbError::db_interaction_error(format!(
|
if *clock_tx != expected_clock {
|
||||||
"transaction pre check failed with err {err:?}"
|
return Err(DbError::db_interaction_error(
|
||||||
))
|
"Last transaction in block must be the clock invocation for the block timestamp"
|
||||||
})?
|
.to_owned(),
|
||||||
.execute_check_on_state(
|
));
|
||||||
&mut breakpoint,
|
}
|
||||||
|
for transaction in user_txs {
|
||||||
|
transaction
|
||||||
|
.clone()
|
||||||
|
.transaction_stateless_check()
|
||||||
|
.map_err(|err| {
|
||||||
|
DbError::db_interaction_error(format!(
|
||||||
|
"transaction pre check failed with err {err:?}"
|
||||||
|
))
|
||||||
|
})?
|
||||||
|
.execute_check_on_state(
|
||||||
|
&mut breakpoint,
|
||||||
|
block.header.block_id,
|
||||||
|
block.header.timestamp,
|
||||||
|
)
|
||||||
|
.map_err(|err| {
|
||||||
|
DbError::db_interaction_error(format!(
|
||||||
|
"transaction execution failed with err {err:?}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let NSSATransaction::Public(clock_public_tx) = clock_tx else {
|
||||||
|
return Err(DbError::db_interaction_error(
|
||||||
|
"Clock invocation must be a public transaction".to_owned(),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
breakpoint
|
||||||
|
.transition_from_public_transaction(
|
||||||
|
clock_public_tx,
|
||||||
block.header.block_id,
|
block.header.block_id,
|
||||||
block.header.timestamp,
|
block.header.timestamp,
|
||||||
)
|
)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
DbError::db_interaction_error(format!(
|
DbError::db_interaction_error(format!(
|
||||||
"transaction execution failed with err {err:?}"
|
"clock transaction execution failed with err {err:?}"
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
@ -213,6 +246,7 @@ fn closest_breakpoint_id(block_id: u64) -> u64 {
|
|||||||
#[expect(clippy::shadow_unrelated, reason = "Fine for tests")]
|
#[expect(clippy::shadow_unrelated, reason = "Fine for tests")]
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use common::test_utils::produce_dummy_block;
|
||||||
use nssa::{AccountId, PublicKey};
|
use nssa::{AccountId, PublicKey};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
|
||||||
@ -302,7 +336,7 @@ mod tests {
|
|||||||
|
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
||||||
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
dbio.put_block(&block, [1; 32]).unwrap();
|
dbio.put_block(&block, [1; 32]).unwrap();
|
||||||
|
|
||||||
@ -369,11 +403,7 @@ mod tests {
|
|||||||
1,
|
1,
|
||||||
&sign_key,
|
&sign_key,
|
||||||
);
|
);
|
||||||
let block = common::test_utils::produce_dummy_block(
|
let block = produce_dummy_block((i + 1).into(), Some(prev_hash), vec![transfer_tx]);
|
||||||
(i + 1).into(),
|
|
||||||
Some(prev_hash),
|
|
||||||
vec![transfer_tx],
|
|
||||||
);
|
|
||||||
dbio.put_block(&block, [i; 32]).unwrap();
|
dbio.put_block(&block, [i; 32]).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -439,7 +469,7 @@ mod tests {
|
|||||||
let prev_hash = last_block.header.hash;
|
let prev_hash = last_block.header.hash;
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
||||||
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
let control_hash1 = block.header.hash;
|
let control_hash1 = block.header.hash;
|
||||||
|
|
||||||
@ -451,7 +481,7 @@ mod tests {
|
|||||||
let prev_hash = last_block.header.hash;
|
let prev_hash = last_block.header.hash;
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
|
||||||
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
let control_hash2 = block.header.hash;
|
let control_hash2 = block.header.hash;
|
||||||
|
|
||||||
@ -466,7 +496,7 @@ mod tests {
|
|||||||
|
|
||||||
let control_tx_hash1 = transfer_tx.hash();
|
let control_tx_hash1 = transfer_tx.hash();
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
|
||||||
dbio.put_block(&block, [3; 32]).unwrap();
|
dbio.put_block(&block, [3; 32]).unwrap();
|
||||||
|
|
||||||
let last_id = dbio.get_meta_last_block_in_db().unwrap();
|
let last_id = dbio.get_meta_last_block_in_db().unwrap();
|
||||||
@ -478,7 +508,7 @@ mod tests {
|
|||||||
|
|
||||||
let control_tx_hash2 = transfer_tx.hash();
|
let control_tx_hash2 = transfer_tx.hash();
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
||||||
dbio.put_block(&block, [4; 32]).unwrap();
|
dbio.put_block(&block, [4; 32]).unwrap();
|
||||||
|
|
||||||
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
|
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
|
||||||
@ -526,7 +556,7 @@ mod tests {
|
|||||||
let prev_hash = last_block.header.hash;
|
let prev_hash = last_block.header.hash;
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
|
||||||
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
block_res.push(block.clone());
|
block_res.push(block.clone());
|
||||||
dbio.put_block(&block, [1; 32]).unwrap();
|
dbio.put_block(&block, [1; 32]).unwrap();
|
||||||
@ -537,7 +567,7 @@ mod tests {
|
|||||||
let prev_hash = last_block.header.hash;
|
let prev_hash = last_block.header.hash;
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
|
||||||
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
block_res.push(block.clone());
|
block_res.push(block.clone());
|
||||||
dbio.put_block(&block, [2; 32]).unwrap();
|
dbio.put_block(&block, [2; 32]).unwrap();
|
||||||
@ -549,7 +579,7 @@ mod tests {
|
|||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
|
||||||
block_res.push(block.clone());
|
block_res.push(block.clone());
|
||||||
dbio.put_block(&block, [3; 32]).unwrap();
|
dbio.put_block(&block, [3; 32]).unwrap();
|
||||||
|
|
||||||
@ -560,7 +590,7 @@ mod tests {
|
|||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
||||||
block_res.push(block.clone());
|
block_res.push(block.clone());
|
||||||
dbio.put_block(&block, [4; 32]).unwrap();
|
dbio.put_block(&block, [4; 32]).unwrap();
|
||||||
|
|
||||||
@ -633,11 +663,7 @@ mod tests {
|
|||||||
tx_hash_res.push(transfer_tx1.hash().0);
|
tx_hash_res.push(transfer_tx1.hash().0);
|
||||||
tx_hash_res.push(transfer_tx2.hash().0);
|
tx_hash_res.push(transfer_tx2.hash().0);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(
|
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
|
||||||
2,
|
|
||||||
Some(prev_hash),
|
|
||||||
vec![transfer_tx1, transfer_tx2],
|
|
||||||
);
|
|
||||||
|
|
||||||
dbio.put_block(&block, [1; 32]).unwrap();
|
dbio.put_block(&block, [1; 32]).unwrap();
|
||||||
|
|
||||||
@ -652,11 +678,7 @@ mod tests {
|
|||||||
tx_hash_res.push(transfer_tx1.hash().0);
|
tx_hash_res.push(transfer_tx1.hash().0);
|
||||||
tx_hash_res.push(transfer_tx2.hash().0);
|
tx_hash_res.push(transfer_tx2.hash().0);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(
|
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
|
||||||
3,
|
|
||||||
Some(prev_hash),
|
|
||||||
vec![transfer_tx1, transfer_tx2],
|
|
||||||
);
|
|
||||||
|
|
||||||
dbio.put_block(&block, [2; 32]).unwrap();
|
dbio.put_block(&block, [2; 32]).unwrap();
|
||||||
|
|
||||||
@ -671,11 +693,7 @@ mod tests {
|
|||||||
tx_hash_res.push(transfer_tx1.hash().0);
|
tx_hash_res.push(transfer_tx1.hash().0);
|
||||||
tx_hash_res.push(transfer_tx2.hash().0);
|
tx_hash_res.push(transfer_tx2.hash().0);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(
|
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
|
||||||
4,
|
|
||||||
Some(prev_hash),
|
|
||||||
vec![transfer_tx1, transfer_tx2],
|
|
||||||
);
|
|
||||||
|
|
||||||
dbio.put_block(&block, [3; 32]).unwrap();
|
dbio.put_block(&block, [3; 32]).unwrap();
|
||||||
|
|
||||||
@ -687,7 +705,7 @@ mod tests {
|
|||||||
common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key);
|
common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key);
|
||||||
tx_hash_res.push(transfer_tx.hash().0);
|
tx_hash_res.push(transfer_tx.hash().0);
|
||||||
|
|
||||||
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
|
||||||
|
|
||||||
dbio.put_block(&block, [4; 32]).unwrap();
|
dbio.put_block(&block, [4; 32]).unwrap();
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user