diff --git a/Cargo.lock b/Cargo.lock index 85856c94..914a4dd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1210,6 +1210,7 @@ dependencies = [ "anyhow", "common", "futures", + "humantime-serde", "log", "logos-blockchain-chain-broadcast-service", "logos-blockchain-chain-service", @@ -1488,6 +1489,15 @@ dependencies = [ "serde", ] +[[package]] +name = "bytesize" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd91ee7b2422bcb158d90ef4d14f75ef67f340943fc4149891dcce8f8b972a3" +dependencies = [ + "serde_core", +] + [[package]] name = "bytestring" version = "1.5.0" @@ -1743,6 +1753,7 @@ dependencies = [ "anyhow", "base64 0.22.1", "borsh", + "bytesize", "hex", "log", "logos-blockchain-common-http-client", @@ -3430,6 +3441,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hydration_context" version = "0.3.0" @@ -3725,6 +3746,7 @@ dependencies = [ "borsh", "common", "futures", + "humantime-serde", "log", "logos-blockchain-core", "nssa", @@ -3835,6 +3857,7 @@ dependencies = [ "anyhow", "base64 0.22.1", "borsh", + "bytesize", "common", "env_logger", "futures", @@ -5562,6 +5585,7 @@ dependencies = [ "base58", "borsh", "bytemuck", + "bytesize", "chacha20", "k256", "risc0-zkvm", @@ -7430,9 +7454,11 @@ dependencies = [ "base58", "bedrock_client", "borsh", + "bytesize", "chrono", "common", "futures", + "humantime-serde", "jsonrpsee", "log", "logos-blockchain-core", @@ -7460,6 +7486,7 @@ dependencies = [ "base64 0.22.1", "bedrock_client", "borsh", + "bytesize", "common", "futures", "hex", @@ -8968,6 +8995,8 @@ dependencies = [ "env_logger", "futures", "hex", + "humantime", + "humantime-serde", "indicatif", "itertools 0.14.0", "key_protocol", diff --git a/Cargo.toml b/Cargo.toml index 520844b6..c868e0a9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,6 @@ members = [ "indexer/service/protocol", "indexer/service/rpc", "explorer_service", - "programs/token/core", - "programs/token", "program_methods", "program_methods/guest", "test_program_methods", @@ -52,7 +50,7 @@ indexer_service = { path = "indexer/service" } indexer_service_protocol = { path = "indexer/service/protocol" } indexer_service_rpc = { path = "indexer/service/rpc" } wallet = { path = "wallet" } -wallet-ffi = { path = "wallet-ffi" } +wallet-ffi = { path = "wallet-ffi", default-features = false } token_core = { path = "programs/token/core" } token_program = { path = "programs/token" } amm_core = { path = "programs/amm/core" } @@ -89,6 +87,9 @@ thiserror = "2.0.12" sha2 = "0.10.8" hex = "0.4.3" bytemuck = "1.24.0" +bytesize = { version = "2.3.1", features = ["serde"] } +humantime-serde = "1.1" +humantime = "2.1" aes-gcm = "0.10.3" toml = "0.7.4" bincode = "1.3.3" diff --git a/README.md b/README.md index b94a68d4..ee2d8097 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ The sequencer and logos blockchain node can be run locally: - `./scripts/setup-logos-blockchain-circuits.sh` - `cargo build --all-features` - `./target/debug/logos-blockchain-node --deployment nodes/node/standalone-deployment-config.yaml nodes/node/standalone-node-config.yaml` - + 2. Alternatively (WARNING: This node is outdated) go to ``logos-blockchain/lssa/` repo and run the node from docker: - `cd bedrock` - Change line 14 of `docker-compose.yml` from `"0:18080/tcp"` into `"8080:18080/tcp"` @@ -162,6 +162,48 @@ After stopping services above you need to remove 3 folders to start cleanly: 3. In the `lssa` file `sequencer_runner/bedrock_signing_key` 4. In the `lssa` folder `indexer/service/rocksdb` +### Normal mode (`just` commands) +We provide a `Justfile` for developer and user needs, you can run the whole setup with it. The only difference will be that logos-blockchain (bedrock) will be started from docker. + +#### 1'st Terminal + +```bash +just run-bedrock +``` + +#### 2'nd Terminal + +```bash +just run-indexer +``` + +#### 3'rd Terminal + +```bash +just run-sequencer +``` + +#### 4'th Terminal + +```bash +just run-explorer +``` + +#### 5'th Terminal + +You can run any command our wallet support by passing it as an argument for `just run-wallet`, for example: + +```bash +just run-wallet check-health +``` + +This will use a wallet binary built from this repo and not the one installed in your system if you have some. Also another wallet home directory will be used. This is done to not to mess up with your local wallet and to easily clean generated files (see next section). + +#### Shutdown + +1. Press `ctrl-c` in every terminal +2. Run `just clean` to clean runtime data + ### Standalone mode The sequencer can be run in standalone mode with: ```bash diff --git a/artifacts/program_methods/amm.bin b/artifacts/program_methods/amm.bin index 55dfea0d..45e28420 100644 Binary files a/artifacts/program_methods/amm.bin and b/artifacts/program_methods/amm.bin differ diff --git a/artifacts/program_methods/authenticated_transfer.bin b/artifacts/program_methods/authenticated_transfer.bin index a99aea50..5ff56cf5 100644 Binary files a/artifacts/program_methods/authenticated_transfer.bin and b/artifacts/program_methods/authenticated_transfer.bin differ diff --git a/artifacts/program_methods/pinata.bin b/artifacts/program_methods/pinata.bin index b901243f..aef1a70c 100644 Binary files a/artifacts/program_methods/pinata.bin and b/artifacts/program_methods/pinata.bin differ diff --git a/artifacts/program_methods/pinata_token.bin b/artifacts/program_methods/pinata_token.bin index 63b17a35..47ada3c2 100644 Binary files a/artifacts/program_methods/pinata_token.bin and b/artifacts/program_methods/pinata_token.bin differ diff --git a/artifacts/program_methods/privacy_preserving_circuit.bin b/artifacts/program_methods/privacy_preserving_circuit.bin index 4bbfac21..3a0330e6 100644 Binary files a/artifacts/program_methods/privacy_preserving_circuit.bin and b/artifacts/program_methods/privacy_preserving_circuit.bin differ diff --git a/artifacts/program_methods/token.bin b/artifacts/program_methods/token.bin index 90d52232..dbbf3c07 100644 Binary files a/artifacts/program_methods/token.bin and b/artifacts/program_methods/token.bin differ diff --git a/artifacts/test_program_methods/burner.bin b/artifacts/test_program_methods/burner.bin index c143aa3c..a3a2839e 100644 Binary files a/artifacts/test_program_methods/burner.bin and b/artifacts/test_program_methods/burner.bin differ diff --git a/artifacts/test_program_methods/chain_caller.bin b/artifacts/test_program_methods/chain_caller.bin index 439cc765..b920b0e2 100644 Binary files a/artifacts/test_program_methods/chain_caller.bin and b/artifacts/test_program_methods/chain_caller.bin differ diff --git a/artifacts/test_program_methods/changer_claimer.bin b/artifacts/test_program_methods/changer_claimer.bin index 53ee1041..019b2df0 100644 Binary files a/artifacts/test_program_methods/changer_claimer.bin and b/artifacts/test_program_methods/changer_claimer.bin differ diff --git a/artifacts/test_program_methods/claimer.bin b/artifacts/test_program_methods/claimer.bin index 83c18dca..6aaa6bba 100644 Binary files a/artifacts/test_program_methods/claimer.bin and b/artifacts/test_program_methods/claimer.bin differ diff --git a/artifacts/test_program_methods/data_changer.bin b/artifacts/test_program_methods/data_changer.bin index 13936cbf..5712c28e 100644 Binary files a/artifacts/test_program_methods/data_changer.bin and b/artifacts/test_program_methods/data_changer.bin differ diff --git a/artifacts/test_program_methods/extra_output.bin b/artifacts/test_program_methods/extra_output.bin index 7479bad9..20e0e12a 100644 Binary files a/artifacts/test_program_methods/extra_output.bin and b/artifacts/test_program_methods/extra_output.bin differ diff --git a/artifacts/test_program_methods/malicious_authorization_changer.bin b/artifacts/test_program_methods/malicious_authorization_changer.bin index 8c51807c..8c434e84 100644 Binary files a/artifacts/test_program_methods/malicious_authorization_changer.bin and b/artifacts/test_program_methods/malicious_authorization_changer.bin differ diff --git a/artifacts/test_program_methods/minter.bin b/artifacts/test_program_methods/minter.bin index fcdf47de..648c9e2a 100644 Binary files a/artifacts/test_program_methods/minter.bin and b/artifacts/test_program_methods/minter.bin differ diff --git a/artifacts/test_program_methods/missing_output.bin b/artifacts/test_program_methods/missing_output.bin index 3d1f52a8..dcaf8cd0 100644 Binary files a/artifacts/test_program_methods/missing_output.bin and b/artifacts/test_program_methods/missing_output.bin differ diff --git a/artifacts/test_program_methods/modified_transfer.bin b/artifacts/test_program_methods/modified_transfer.bin index 8611238c..b06bcdd4 100644 Binary files a/artifacts/test_program_methods/modified_transfer.bin and b/artifacts/test_program_methods/modified_transfer.bin differ diff --git a/artifacts/test_program_methods/nonce_changer.bin b/artifacts/test_program_methods/nonce_changer.bin index f046b041..c9434606 100644 Binary files a/artifacts/test_program_methods/nonce_changer.bin and b/artifacts/test_program_methods/nonce_changer.bin differ diff --git a/artifacts/test_program_methods/noop.bin b/artifacts/test_program_methods/noop.bin index 6ef6047f..42cddfab 100644 Binary files a/artifacts/test_program_methods/noop.bin and b/artifacts/test_program_methods/noop.bin differ diff --git a/artifacts/test_program_methods/program_owner_changer.bin b/artifacts/test_program_methods/program_owner_changer.bin index 5b45a151..66474082 100644 Binary files a/artifacts/test_program_methods/program_owner_changer.bin and b/artifacts/test_program_methods/program_owner_changer.bin differ diff --git a/artifacts/test_program_methods/simple_balance_transfer.bin b/artifacts/test_program_methods/simple_balance_transfer.bin index bd43e33a..f2383856 100644 Binary files a/artifacts/test_program_methods/simple_balance_transfer.bin and b/artifacts/test_program_methods/simple_balance_transfer.bin differ diff --git a/bedrock_client/Cargo.toml b/bedrock_client/Cargo.toml index 6f8b8a74..696174d8 100644 --- a/bedrock_client/Cargo.toml +++ b/bedrock_client/Cargo.toml @@ -13,6 +13,7 @@ tokio-retry.workspace = true futures.workspace = true log.workspace = true serde.workspace = true +humantime-serde.workspace = true logos-blockchain-common-http-client.workspace = true logos-blockchain-core.workspace = true logos-blockchain-chain-broadcast-service.workspace = true diff --git a/bedrock_client/src/lib.rs b/bedrock_client/src/lib.rs index 7aae7783..534a0cf6 100644 --- a/bedrock_client/src/lib.rs +++ b/bedrock_client/src/lib.rs @@ -3,6 +3,8 @@ use std::time::Duration; use anyhow::{Context as _, Result}; use common::config::BasicAuth; use futures::{Stream, TryFutureExt}; +#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")] +use humantime_serde; use log::{info, warn}; pub use logos_blockchain_chain_broadcast_service::BlockInfo; use logos_blockchain_chain_service::CryptarchiaInfo; @@ -15,14 +17,15 @@ use tokio_retry::Retry; /// Fibonacci backoff retry strategy configuration #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct BackoffConfig { - pub start_delay_millis: u64, + #[serde(with = "humantime_serde")] + pub start_delay: Duration, pub max_retries: usize, } impl Default for BackoffConfig { fn default() -> Self { Self { - start_delay_millis: 100, + start_delay: Duration::from_millis(100), max_retries: 5, } } @@ -93,7 +96,9 @@ impl BedrockClient { } fn backoff_strategy(&self) -> impl Iterator { - tokio_retry::strategy::FibonacciBackoff::from_millis(self.backoff.start_delay_millis) - .take(self.backoff.max_retries) + tokio_retry::strategy::FibonacciBackoff::from_millis( + self.backoff.start_delay.as_millis() as u64 + ) + .take(self.backoff.max_retries) } } diff --git a/common/Cargo.toml b/common/Cargo.toml index 2ef707f3..f7658304 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -18,6 +18,7 @@ sha2.workspace = true log.workspace = true hex.workspace = true borsh.workspace = true +bytesize.workspace = true base64.workspace = true url.workspace = true logos-blockchain-common-http-client.workspace = true diff --git a/common/src/rpc_primitives/mod.rs b/common/src/rpc_primitives/mod.rs index ee64fb43..be5182e1 100644 --- a/common/src/rpc_primitives/mod.rs +++ b/common/src/rpc_primitives/mod.rs @@ -1,3 +1,4 @@ +use bytesize::ByteSize; use serde::{Deserialize, Serialize}; pub mod errors; @@ -8,13 +9,13 @@ pub mod requests; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RpcLimitsConfig { /// Maximum byte size of the json payload. - pub json_payload_max_size: usize, + pub json_payload_max_size: ByteSize, } impl Default for RpcLimitsConfig { fn default() -> Self { Self { - json_payload_max_size: 10 * 1024 * 1024, + json_payload_max_size: ByteSize::mib(10), } } } diff --git a/common/src/transaction.rs b/common/src/transaction.rs index 33617da7..a996250b 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -103,4 +103,6 @@ pub enum TransactionMalformationError { InvalidSignature, #[error("Failed to decode transaction with hash: {tx:?}")] FailedToDecode { tx: HashType }, + #[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")] + TransactionTooLarge { size: usize, max: usize }, } diff --git a/completions/zsh/_wallet b/completions/zsh/_wallet index 9f40c4fb..e0c5f415 100644 --- a/completions/zsh/_wallet +++ b/completions/zsh/_wallet @@ -346,7 +346,7 @@ _wallet_config() { 'all' 'override_rust_log' 'sequencer_addr' - 'seq_poll_timeout_millis' + 'seq_poll_timeout' 'seq_tx_poll_max_blocks' 'seq_poll_max_retries' 'seq_block_poll_max_amount' diff --git a/configs/docker-all-in-one/indexer/indexer_config.json b/configs/docker-all-in-one/indexer/indexer_config.json index ae106b46..4c4fe085 100644 --- a/configs/docker-all-in-one/indexer/indexer_config.json +++ b/configs/docker-all-in-one/indexer/indexer_config.json @@ -1,9 +1,9 @@ { - "resubscribe_interval_millis": 1000, + "resubscribe_interval": "1s", "bedrock_client_config": { "addr": "http://logos-blockchain-node-0:18080", "backoff": { - "start_delay_millis": 100, + "start_delay": "100ms", "max_retries": 5 } }, diff --git a/configs/docker-all-in-one/sequencer/sequencer_config.json b/configs/docker-all-in-one/sequencer/sequencer_config.json index 579253dc..7217bf5a 100644 --- a/configs/docker-all-in-one/sequencer/sequencer_config.json +++ b/configs/docker-all-in-one/sequencer/sequencer_config.json @@ -4,13 +4,14 @@ "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, + "max_block_size": "1 MiB", "mempool_max_size": 10000, - "block_create_timeout_millis": 10000, - "retry_pending_blocks_timeout_millis": 7000, + "block_create_timeout": "10s", + "retry_pending_blocks_timeout": "7s", "port": 3040, "bedrock_config": { "backoff": { - "start_delay_millis": 100, + "start_delay": "100ms", "max_retries": 5 }, "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", diff --git a/indexer/core/Cargo.toml b/indexer/core/Cargo.toml index 104d6d5a..792fb4b7 100644 --- a/indexer/core/Cargo.toml +++ b/indexer/core/Cargo.toml @@ -14,6 +14,7 @@ storage.workspace = true anyhow.workspace = true log.workspace = true serde.workspace = true +humantime-serde.workspace = true tokio.workspace = true borsh.workspace = true futures.workspace = true diff --git a/indexer/core/src/config.rs b/indexer/core/src/config.rs index e3cd4d04..95e6147c 100644 --- a/indexer/core/src/config.rs +++ b/indexer/core/src/config.rs @@ -2,6 +2,7 @@ use std::{ fs::File, io::BufReader, path::{Path, PathBuf}, + time::Duration, }; use anyhow::{Context as _, Result}; @@ -10,6 +11,7 @@ use common::{ block::{AccountInitialData, CommitmentsInitialData}, config::BasicAuth, }; +use humantime_serde; pub use logos_blockchain_core::mantle::ops::channel::ChannelId; use serde::{Deserialize, Serialize}; use url::Url; @@ -33,7 +35,8 @@ pub struct IndexerConfig { pub initial_commitments: Vec, /// Sequencers signing key pub signing_key: [u8; 32], - pub consensus_info_polling_interval_millis: u64, + #[serde(with = "humantime_serde")] + pub consensus_info_polling_interval: Duration, pub bedrock_client_config: ClientConfig, pub channel_id: ChannelId, } diff --git a/indexer/core/src/lib.rs b/indexer/core/src/lib.rs index 1e2986a3..b24cb1bd 100644 --- a/indexer/core/src/lib.rs +++ b/indexer/core/src/lib.rs @@ -174,13 +174,10 @@ impl IndexerCore { break Ok(next_lib); } else { info!( - "Wait {}ms to not spam the node", - self.config.consensus_info_polling_interval_millis + "Wait {:?} to not spam the node", + self.config.consensus_info_polling_interval ); - tokio::time::sleep(std::time::Duration::from_millis( - self.config.consensus_info_polling_interval_millis, - )) - .await; + tokio::time::sleep(self.config.consensus_info_polling_interval).await; } } } diff --git a/indexer/service/configs/indexer_config.json b/indexer/service/configs/indexer_config.json index e748d96a..7d7a317c 100644 --- a/indexer/service/configs/indexer_config.json +++ b/indexer/service/configs/indexer_config.json @@ -1,10 +1,10 @@ { "home": "./indexer/service", - "consensus_info_polling_interval_millis": 10000, + "consensus_info_polling_interval": "1s", "bedrock_client_config": { "addr": "http://localhost:8080", "backoff": { - "start_delay_millis": 100, + "start_delay": "100ms", "max_retries": 5 } }, diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index 101488e0..ac14d183 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -28,5 +28,6 @@ tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } hex.workspace = true tempfile.workspace = true borsh.workspace = true +bytesize.workspace = true futures.workspace = true testcontainers = { version = "0.27.0", features = ["docker-compose"] } diff --git a/integration_tests/src/config.rs b/integration_tests/src/config.rs index 445929dd..8dd18a25 100644 --- a/integration_tests/src/config.rs +++ b/integration_tests/src/config.rs @@ -1,6 +1,7 @@ -use std::{net::SocketAddr, path::PathBuf}; +use std::{net::SocketAddr, path::PathBuf, time::Duration}; use anyhow::{Context, Result}; +use bytesize::ByteSize; use common::block::{AccountInitialData, CommitmentsInitialData}; use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig}; use key_protocol::key_management::KeyChain; @@ -19,13 +20,13 @@ pub fn indexer_config( ) -> Result { Ok(IndexerConfig { home, - consensus_info_polling_interval_millis: 10000, + consensus_info_polling_interval: Duration::from_secs(1), bedrock_client_config: ClientConfig { addr: addr_to_url(UrlProtocol::Http, bedrock_addr) .context("Failed to convert bedrock addr to URL")?, auth: None, backoff: BackoffConfig { - start_delay_millis: 100, + start_delay: Duration::from_millis(100), max_retries: 10, }, }, @@ -39,16 +40,18 @@ pub fn indexer_config( /// Sequencer config options available for custom changes in integration tests. pub struct SequencerPartialConfig { pub max_num_tx_in_block: usize, + pub max_block_size: ByteSize, pub mempool_max_size: usize, - pub block_create_timeout_millis: u64, + pub block_create_timeout: Duration, } impl Default for SequencerPartialConfig { fn default() -> Self { Self { max_num_tx_in_block: 20, + max_block_size: ByteSize::mib(1), mempool_max_size: 10_000, - block_create_timeout_millis: 10_000, + block_create_timeout: Duration::from_secs(10), } } } @@ -62,8 +65,9 @@ pub fn sequencer_config( ) -> Result { let SequencerPartialConfig { max_num_tx_in_block, + max_block_size, mempool_max_size, - block_create_timeout_millis, + block_create_timeout, } = partial; Ok(SequencerConfig { @@ -72,16 +76,17 @@ pub fn sequencer_config( genesis_id: 1, is_genesis_random: true, max_num_tx_in_block, + max_block_size, mempool_max_size, - block_create_timeout_millis, - retry_pending_blocks_timeout_millis: 120_000, + block_create_timeout, + retry_pending_blocks_timeout: Duration::from_secs(120), port: 0, initial_accounts: initial_data.sequencer_initial_accounts(), initial_commitments: initial_data.sequencer_initial_commitments(), signing_key: [37; 32], bedrock_config: BedrockConfig { backoff: BackoffConfig { - start_delay_millis: 100, + start_delay: Duration::from_millis(100), max_retries: 5, }, channel_id: bedrock_channel_id(), @@ -102,7 +107,7 @@ pub fn wallet_config( override_rust_log: None, sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr) .context("Failed to convert sequencer addr to URL")?, - seq_poll_timeout_millis: 30_000, + seq_poll_timeout: Duration::from_secs(30), seq_tx_poll_max_blocks: 15, seq_poll_max_retries: 10, seq_block_poll_max_amount: 100, diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index abf72bfe..f0111735 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -21,6 +21,7 @@ pub mod config; // TODO: Remove this and control time from tests pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12; pub const NSSA_PROGRAM_FOR_TEST_DATA_CHANGER: &str = "data_changer.bin"; +pub const NSSA_PROGRAM_FOR_TEST_NOOP: &str = "noop.bin"; const BEDROCK_SERVICE_WITH_OPEN_PORT: &str = "logos-blockchain-node-0"; const BEDROCK_SERVICE_PORT: u16 = 18080; diff --git a/integration_tests/tests/block_size_limit.rs b/integration_tests/tests/block_size_limit.rs new file mode 100644 index 00000000..d8ee64dc --- /dev/null +++ b/integration_tests/tests/block_size_limit.rs @@ -0,0 +1,185 @@ +use std::time::Duration; + +use anyhow::Result; +use bytesize::ByteSize; +use common::{block::HashableBlockData, transaction::NSSATransaction}; +use integration_tests::{ + TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, config::SequencerPartialConfig, +}; +use nssa::program::Program; +use tokio::test; + +#[test] +async fn reject_oversized_transaction() -> Result<()> { + let ctx = TestContext::builder() + .with_sequencer_partial_config(SequencerPartialConfig { + max_num_tx_in_block: 100, + max_block_size: ByteSize::mib(1), + mempool_max_size: 1000, + block_create_timeout: Duration::from_secs(10), + }) + .build() + .await?; + + // Create a transaction that's definitely too large + // Block size is 1 MiB (1,048,576 bytes), minus ~200 bytes for header = ~1,048,376 bytes max tx + // Create a 1.1 MiB binary to ensure it exceeds the limit + let oversized_binary = vec![0u8; 1100 * 1024]; // 1.1 MiB binary + + let message = nssa::program_deployment_transaction::Message::new(oversized_binary); + let tx = nssa::ProgramDeploymentTransaction::new(message); + + // Try to submit the transaction and expect an error + let result = ctx.sequencer_client().send_tx_program(tx).await; + + assert!( + result.is_err(), + "Expected error when submitting oversized transaction" + ); + + let err = result.unwrap_err(); + let err_str = format!("{:?}", err); + + // Check if the error contains information about transaction being too large + assert!( + err_str.contains("TransactionTooLarge") || err_str.contains("too large"), + "Expected TransactionTooLarge error, got: {}", + err_str + ); + + Ok(()) +} + +#[test] +async fn accept_transaction_within_limit() -> Result<()> { + let ctx = TestContext::builder() + .with_sequencer_partial_config(SequencerPartialConfig { + max_num_tx_in_block: 100, + max_block_size: ByteSize::mib(1), + mempool_max_size: 1000, + block_create_timeout: Duration::from_secs(10), + }) + .build() + .await?; + + // Create a small program deployment that should fit + let small_binary = vec![0u8; 1024]; // 1 KiB binary + + let message = nssa::program_deployment_transaction::Message::new(small_binary); + let tx = nssa::ProgramDeploymentTransaction::new(message); + + // This should succeed + let result = ctx.sequencer_client().send_tx_program(tx).await; + + assert!( + result.is_ok(), + "Expected successful submission of small transaction, got error: {:?}", + result.as_ref().unwrap_err() + ); + + Ok(()) +} + +#[test] +async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let artifacts_dir = + std::path::PathBuf::from(manifest_dir).join("../artifacts/test_program_methods"); + + let burner_bytecode = std::fs::read(artifacts_dir.join("burner.bin"))?; + let chain_caller_bytecode = std::fs::read(artifacts_dir.join("chain_caller.bin"))?; + + // Calculate block size to fit only one of the two transactions, leaving some room for headers + // (e.g., 10 KiB) + let max_program_size = burner_bytecode.len().max(chain_caller_bytecode.len()); + let block_size = ByteSize::b((max_program_size + 10 * 1024) as u64); + + let ctx = TestContext::builder() + .with_sequencer_partial_config(SequencerPartialConfig { + max_num_tx_in_block: 100, + max_block_size: block_size, + mempool_max_size: 1000, + block_create_timeout: Duration::from_secs(10), + }) + .build() + .await?; + + let burner_id = Program::new(burner_bytecode.clone())?.id(); + let chain_caller_id = Program::new(chain_caller_bytecode.clone())?.id(); + + let initial_block_height = ctx.sequencer_client().get_last_block().await?.last_block; + + // Submit both program deployments + ctx.sequencer_client() + .send_tx_program(nssa::ProgramDeploymentTransaction::new( + nssa::program_deployment_transaction::Message::new(burner_bytecode), + )) + .await?; + + ctx.sequencer_client() + .send_tx_program(nssa::ProgramDeploymentTransaction::new( + nssa::program_deployment_transaction::Message::new(chain_caller_bytecode), + )) + .await?; + + // Wait for first block + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + let block1_response = ctx + .sequencer_client() + .get_block(initial_block_height + 1) + .await?; + let block1: HashableBlockData = borsh::from_slice(&block1_response.block)?; + + // Check which program is in block 1 + let get_program_ids = |block: &HashableBlockData| -> Vec { + block + .transactions + .iter() + .filter_map(|tx| { + if let NSSATransaction::ProgramDeployment(deployment) = tx { + let bytecode = deployment.message.clone().into_bytecode(); + Program::new(bytecode).ok().map(|p| p.id()) + } else { + None + } + }) + .collect() + }; + + let block1_program_ids = get_program_ids(&block1); + + // First program should be in block 1, but not both due to block size limit + assert_eq!( + block1_program_ids.len(), + 1, + "Expected exactly one program deployment in block 1" + ); + assert_eq!( + block1_program_ids[0], burner_id, + "Expected burner program to be deployed in block 1" + ); + + // Wait for second block + tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await; + + let block2_response = ctx + .sequencer_client() + .get_block(initial_block_height + 2) + .await?; + let block2: HashableBlockData = borsh::from_slice(&block2_response.block)?; + let block2_program_ids = get_program_ids(&block2); + + // The other program should be in block 2 + assert_eq!( + block2_program_ids.len(), + 1, + "Expected exactly one program deployment in block 2" + ); + assert_eq!( + block2_program_ids[0], chain_caller_id, + "Expected chain_caller program to be deployed in block 2" + ); + + Ok(()) +} diff --git a/integration_tests/tests/config.rs b/integration_tests/tests/config.rs index ca800d0f..ed301616 100644 --- a/integration_tests/tests/config.rs +++ b/integration_tests/tests/config.rs @@ -8,22 +8,22 @@ use wallet::cli::{Command, config::ConfigSubcommand}; async fn modify_config_field() -> Result<()> { let mut ctx = TestContext::new().await?; - let old_seq_poll_timeout_millis = ctx.wallet().config().seq_poll_timeout_millis; + let old_seq_poll_timeout = ctx.wallet().config().seq_poll_timeout; // Change config field let command = Command::Config(ConfigSubcommand::Set { - key: "seq_poll_timeout_millis".to_string(), - value: "1000".to_string(), + key: "seq_poll_timeout".to_string(), + value: "1s".to_string(), }); wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; - let new_seq_poll_timeout_millis = ctx.wallet().config().seq_poll_timeout_millis; - assert_eq!(new_seq_poll_timeout_millis, 1000); + let new_seq_poll_timeout = ctx.wallet().config().seq_poll_timeout; + assert_eq!(new_seq_poll_timeout, std::time::Duration::from_secs(1)); // Return how it was at the beginning let command = Command::Config(ConfigSubcommand::Set { - key: "seq_poll_timeout_millis".to_string(), - value: old_seq_poll_timeout_millis.to_string(), + key: "seq_poll_timeout".to_string(), + value: format!("{:?}", old_seq_poll_timeout), }); wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; diff --git a/integration_tests/tests/tps.rs b/integration_tests/tests/tps.rs index 2c58721e..12669f90 100644 --- a/integration_tests/tests/tps.rs +++ b/integration_tests/tests/tps.rs @@ -1,6 +1,7 @@ use std::time::{Duration, Instant}; use anyhow::Result; +use bytesize::ByteSize; use integration_tests::{ TestContext, config::{InitialData, SequencerPartialConfig}, @@ -178,8 +179,9 @@ impl TpsTestManager { fn generate_sequencer_partial_config() -> SequencerPartialConfig { SequencerPartialConfig { max_num_tx_in_block: 300, + max_block_size: ByteSize::mb(500), mempool_max_size: 10_000, - block_create_timeout_millis: 12_000, + block_create_timeout: Duration::from_secs(12), } } } diff --git a/mempool/src/lib.rs b/mempool/src/lib.rs index ff6163ff..fae52b3e 100644 --- a/mempool/src/lib.rs +++ b/mempool/src/lib.rs @@ -2,13 +2,17 @@ use tokio::sync::mpsc::{Receiver, Sender}; pub struct MemPool { receiver: Receiver, + front_buffer: Vec, } impl MemPool { pub fn new(max_size: usize) -> (Self, MemPoolHandle) { let (sender, receiver) = tokio::sync::mpsc::channel(max_size); - let mem_pool = Self { receiver }; + let mem_pool = Self { + receiver, + front_buffer: Vec::new(), + }; let sender = MemPoolHandle::new(sender); (mem_pool, sender) } @@ -16,6 +20,13 @@ impl MemPool { pub fn pop(&mut self) -> Option { use tokio::sync::mpsc::error::TryRecvError; + // First check if there are any items in the front buffer (LIFO) + if let Some(item) = self.front_buffer.pop() { + return Some(item); + } + + // Otherwise, try to receive from the channel (FIFO) + match self.receiver.try_recv() { Ok(item) => Some(item), Err(TryRecvError::Empty) => None, @@ -24,6 +35,11 @@ impl MemPool { } } } + + /// Push an item to the front of the mempool (will be popped first) + pub fn push_front(&mut self, item: T) { + self.front_buffer.push(item); + } } pub struct MemPoolHandle { @@ -96,4 +112,24 @@ mod tests { assert_eq!(pool.pop(), Some(1)); assert_eq!(pool.pop(), Some(2)); } + + #[test] + async fn test_push_front() { + let (mut pool, handle) = MemPool::new(10); + + handle.push(1).await.unwrap(); + handle.push(2).await.unwrap(); + + // Push items to the front - these should be popped first + pool.push_front(10); + pool.push_front(20); + + // Items pushed to front are popped in LIFO order + assert_eq!(pool.pop(), Some(20)); + assert_eq!(pool.pop(), Some(10)); + // Original items are then popped in FIFO order + assert_eq!(pool.pop(), Some(1)); + assert_eq!(pool.pop(), Some(2)); + assert_eq!(pool.pop(), None); + } } diff --git a/nssa/core/Cargo.toml b/nssa/core/Cargo.toml index 93f2a4a8..3b6b430f 100644 --- a/nssa/core/Cargo.toml +++ b/nssa/core/Cargo.toml @@ -11,6 +11,7 @@ serde.workspace = true serde_with.workspace = true thiserror.workspace = true bytemuck.workspace = true +bytesize.workspace = true base58.workspace = true k256 = { workspace = true, optional = true } chacha20 = { version = "0.9", default-features = false } diff --git a/nssa/core/src/account/data.rs b/nssa/core/src/account/data.rs index 396bbe6e..91c58516 100644 --- a/nssa/core/src/account/data.rs +++ b/nssa/core/src/account/data.rs @@ -1,9 +1,10 @@ use std::ops::Deref; use borsh::{BorshDeserialize, BorshSerialize}; +use bytesize::ByteSize; use serde::{Deserialize, Serialize}; -pub const DATA_MAX_LENGTH_IN_BYTES: usize = 100 * 1024; // 100 KiB +pub const DATA_MAX_LENGTH: ByteSize = ByteSize::kib(100); #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, BorshSerialize)] pub struct Data(Vec); @@ -22,7 +23,7 @@ impl Data { let mut u32_bytes = [0u8; 4]; cursor.read_exact(&mut u32_bytes)?; let data_length = u32::from_le_bytes(u32_bytes); - if data_length as usize > DATA_MAX_LENGTH_IN_BYTES { + if data_length as usize > DATA_MAX_LENGTH.as_u64() as usize { return Err( std::io::Error::new(std::io::ErrorKind::InvalidData, DataTooBigError).into(), ); @@ -35,7 +36,7 @@ impl Data { } #[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)] -#[error("data length exceeds maximum allowed length of {DATA_MAX_LENGTH_IN_BYTES} bytes")] +#[error("data length exceeds maximum allowed length of {} bytes", DATA_MAX_LENGTH.as_u64())] pub struct DataTooBigError; impl From for Vec { @@ -48,7 +49,7 @@ impl TryFrom> for Data { type Error = DataTooBigError; fn try_from(value: Vec) -> Result { - if value.len() > DATA_MAX_LENGTH_IN_BYTES { + if value.len() > DATA_MAX_LENGTH.as_u64() as usize { Err(DataTooBigError) } else { Ok(Self(value)) @@ -78,7 +79,7 @@ impl<'de> Deserialize<'de> for Data { /// Data deserialization visitor. /// /// Compared to a simple deserialization into a `Vec`, this visitor enforces - /// early length check defined by [`DATA_MAX_LENGTH_IN_BYTES`]. + /// early length check defined by [`DATA_MAX_LENGTH`]. struct DataVisitor; impl<'de> serde::de::Visitor<'de> for DataVisitor { @@ -88,7 +89,7 @@ impl<'de> Deserialize<'de> for Data { write!( formatter, "a byte array with length not exceeding {} bytes", - DATA_MAX_LENGTH_IN_BYTES + DATA_MAX_LENGTH.as_u64() ) } @@ -96,11 +97,14 @@ impl<'de> Deserialize<'de> for Data { where A: serde::de::SeqAccess<'de>, { - let mut vec = - Vec::with_capacity(seq.size_hint().unwrap_or(0).min(DATA_MAX_LENGTH_IN_BYTES)); + let mut vec = Vec::with_capacity( + seq.size_hint() + .unwrap_or(0) + .min(DATA_MAX_LENGTH.as_u64() as usize), + ); while let Some(value) = seq.next_element()? { - if vec.len() >= DATA_MAX_LENGTH_IN_BYTES { + if vec.len() >= DATA_MAX_LENGTH.as_u64() as usize { return Err(serde::de::Error::custom(DataTooBigError)); } vec.push(value); @@ -121,7 +125,7 @@ impl BorshDeserialize for Data { let len = u32::deserialize_reader(reader)?; match len { 0 => Ok(Self::default()), - len if len as usize > DATA_MAX_LENGTH_IN_BYTES => Err(std::io::Error::new( + len if len as usize > DATA_MAX_LENGTH.as_u64() as usize => Err(std::io::Error::new( std::io::ErrorKind::InvalidData, DataTooBigError, )), @@ -140,21 +144,21 @@ mod tests { #[test] fn test_data_max_length_allowed() { - let max_vec = vec![0u8; DATA_MAX_LENGTH_IN_BYTES]; + let max_vec = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize]; let result = Data::try_from(max_vec); assert!(result.is_ok()); } #[test] fn test_data_too_big_error() { - let big_vec = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1]; + let big_vec = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1]; let result = Data::try_from(big_vec); assert!(matches!(result, Err(DataTooBigError))); } #[test] fn test_borsh_deserialize_exceeding_limit_error() { - let too_big_data = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1]; + let too_big_data = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1]; let mut serialized = Vec::new(); <_ as BorshSerialize>::serialize(&too_big_data, &mut serialized).unwrap(); @@ -164,7 +168,7 @@ mod tests { #[test] fn test_json_deserialize_exceeding_limit_error() { - let data = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1]; + let data = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1]; let json = serde_json::to_string(&data).unwrap(); let result: Result = serde_json::from_str(&json); diff --git a/nssa/src/merkle_tree/mod.rs b/nssa/src/merkle_tree/mod.rs index b3637b13..9c981b62 100644 --- a/nssa/src/merkle_tree/mod.rs +++ b/nssa/src/merkle_tree/mod.rs @@ -21,7 +21,7 @@ fn hash_value(value: &Value) -> Node { } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] -#[derive(BorshSerialize, BorshDeserialize)] +#[derive(Clone, BorshSerialize, BorshDeserialize)] pub struct MerkleTree { nodes: Vec, capacity: usize, diff --git a/nssa/src/state.rs b/nssa/src/state.rs index f5ec2b46..1e347552 100644 --- a/nssa/src/state.rs +++ b/nssa/src/state.rs @@ -16,7 +16,7 @@ use crate::{ pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; -#[derive(BorshSerialize, BorshDeserialize)] +#[derive(Clone, BorshSerialize, BorshDeserialize)] #[cfg_attr(test, derive(Debug, PartialEq, Eq))] pub(crate) struct CommitmentSet { merkle_tree: MerkleTree, @@ -64,6 +64,7 @@ impl CommitmentSet { } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(Clone)] struct NullifierSet(BTreeSet); impl NullifierSet { @@ -104,7 +105,7 @@ impl BorshDeserialize for NullifierSet { } } -#[derive(BorshSerialize, BorshDeserialize)] +#[derive(Clone, BorshSerialize, BorshDeserialize)] #[cfg_attr(test, derive(Debug, PartialEq, Eq))] pub struct V02State { public_state: HashMap, @@ -1331,7 +1332,8 @@ pub mod tests { AccountId::new([0; 32]), ); - let large_data: Vec = vec![0; nssa_core::account::data::DATA_MAX_LENGTH_IN_BYTES + 1]; + let large_data: Vec = + vec![0; nssa_core::account::data::DATA_MAX_LENGTH.as_u64() as usize + 1]; let result = execute_and_prove( vec![public_account], diff --git a/sequencer_core/Cargo.toml b/sequencer_core/Cargo.toml index 870e22df..e939c7ae 100644 --- a/sequencer_core/Cargo.toml +++ b/sequencer_core/Cargo.toml @@ -16,6 +16,7 @@ base58.workspace = true anyhow.workspace = true serde.workspace = true serde_json.workspace = true +humantime-serde.workspace = true tempfile.workspace = true chrono.workspace = true log.workspace = true @@ -24,6 +25,7 @@ logos-blockchain-key-management-system-service.workspace = true logos-blockchain-core.workspace = true rand.workspace = true borsh.workspace = true +bytesize.workspace = true url.workspace = true jsonrpsee = { workspace = true, features = ["ws-client"] } diff --git a/sequencer_core/src/config.rs b/sequencer_core/src/config.rs index 27175917..003b82e8 100644 --- a/sequencer_core/src/config.rs +++ b/sequencer_core/src/config.rs @@ -2,14 +2,17 @@ use std::{ fs::File, io::BufReader, path::{Path, PathBuf}, + time::Duration, }; use anyhow::Result; use bedrock_client::BackoffConfig; +use bytesize::ByteSize; use common::{ block::{AccountInitialData, CommitmentsInitialData}, config::BasicAuth, }; +use humantime_serde; use logos_blockchain_core::mantle::ops::channel::ChannelId; use serde::{Deserialize, Serialize}; use url::Url; @@ -27,12 +30,17 @@ pub struct SequencerConfig { pub is_genesis_random: bool, /// Maximum number of transactions in block pub max_num_tx_in_block: usize, + /// Maximum block size (includes header and transactions) + #[serde(default = "default_max_block_size")] + pub max_block_size: ByteSize, /// Mempool maximum size pub mempool_max_size: usize, /// Interval in which blocks produced - pub block_create_timeout_millis: u64, + #[serde(with = "humantime_serde")] + pub block_create_timeout: Duration, /// Interval in which pending blocks are retried - pub retry_pending_blocks_timeout_millis: u64, + #[serde(with = "humantime_serde")] + pub retry_pending_blocks_timeout: Duration, /// Port to listen pub port: u16, /// List of initial accounts data @@ -68,3 +76,7 @@ impl SequencerConfig { Ok(serde_json::from_reader(reader)?) } } + +fn default_max_block_size() -> ByteSize { + ByteSize::mib(1) +} diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 525eb117..083728bf 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -1,4 +1,4 @@ -use std::{fmt::Display, path::Path, time::Instant}; +use std::{path::Path, time::Instant}; use anyhow::{Context as _, Result, anyhow}; use bedrock_client::SignedMantleTx; @@ -13,7 +13,6 @@ use config::SequencerConfig; use log::{error, info, warn}; use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SIZE, Ed25519Key}; use mempool::{MemPool, MemPoolHandle}; -use serde::{Deserialize, Serialize}; use crate::{ block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, @@ -44,20 +43,6 @@ pub struct SequencerCore< indexer_client: IC, } -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum TransactionMalformationError { - InvalidSignature, - FailedToDecode { tx: HashType }, -} - -impl Display for TransactionMalformationError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:#?}") - } -} - -impl std::error::Error for TransactionMalformationError {} - impl SequencerCore { /// Starts the sequencer using the provided configuration. /// If an existing database is found, the sequencer state is loaded from it and @@ -204,13 +189,49 @@ impl SequencerCore max_block_size { + // Block would exceed size limit, remove last transaction and push back + warn!( + "Transaction with hash {tx_hash} deferred to next block: \ + block size {block_size} bytes would exceed limit of {max_block_size} bytes", + ); + + self.mempool.push_front(tx); + break; + } + match self.execute_check_transaction_on_state(tx) { Ok(valid_tx) => { - info!("Validated transaction with hash {tx_hash}, including it in block",); valid_transactions.push(valid_tx); + info!("Validated transaction with hash {tx_hash}, including it in block"); + if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block { break; } @@ -224,13 +245,6 @@ impl SequencerCore Result { #[cfg(all(test, feature = "mock"))] mod tests { - use std::{pin::pin, str::FromStr as _}; + use std::{pin::pin, str::FromStr as _, time::Duration}; use base58::ToBase58; use bedrock_client::BackoffConfig; @@ -375,22 +389,23 @@ mod tests { genesis_id: 1, is_genesis_random: false, max_num_tx_in_block: 10, + max_block_size: bytesize::ByteSize::mib(1), mempool_max_size: 10000, - block_create_timeout_millis: 1000, + block_create_timeout: Duration::from_secs(1), port: 8080, initial_accounts, initial_commitments: vec![], signing_key: *sequencer_sign_key_for_testing().value(), bedrock_config: BedrockConfig { backoff: BackoffConfig { - start_delay_millis: 100, + start_delay: Duration::from_millis(100), max_retries: 5, }, channel_id: ChannelId::from([0; 32]), node_url: "http://not-used-in-unit-tests".parse().unwrap(), auth: None, }, - retry_pending_blocks_timeout_millis: 1000 * 60 * 4, + retry_pending_blocks_timeout: Duration::from_secs(60 * 4), indexer_rpc_url: "ws://localhost:8779".parse().unwrap(), } } diff --git a/sequencer_rpc/Cargo.toml b/sequencer_rpc/Cargo.toml index f19aee43..42aa978f 100644 --- a/sequencer_rpc/Cargo.toml +++ b/sequencer_rpc/Cargo.toml @@ -25,6 +25,7 @@ itertools.workspace = true actix-web.workspace = true tokio.workspace = true borsh.workspace = true +bytesize.workspace = true [dev-dependencies] sequencer_core = { workspace = true, features = ["mock"] } diff --git a/sequencer_rpc/src/lib.rs b/sequencer_rpc/src/lib.rs index ac92ff45..074ea284 100644 --- a/sequencer_rpc/src/lib.rs +++ b/sequencer_rpc/src/lib.rs @@ -28,6 +28,7 @@ pub struct JsonHandler< > { sequencer_state: Arc>>, mempool_handle: MemPoolHandle, + max_block_size: usize, } fn respond(val: T) -> Result { diff --git a/sequencer_rpc/src/net_utils.rs b/sequencer_rpc/src/net_utils.rs index ee9f6aa1..a15cabec 100644 --- a/sequencer_rpc/src/net_utils.rs +++ b/sequencer_rpc/src/net_utils.rs @@ -52,7 +52,7 @@ fn get_cors(cors_allowed_origins: &[String]) -> Cors { .max_age(3600) } -pub fn new_http_server( +pub async fn new_http_server( config: RpcConfig, seuquencer_core: Arc>, mempool_handle: MemPoolHandle, @@ -63,9 +63,16 @@ pub fn new_http_server( limits_config, } = config; info!(target:NETWORK, "Starting HTTP server at {addr}"); + let max_block_size = seuquencer_core + .lock() + .await + .sequencer_config() + .max_block_size + .as_u64() as usize; let handler = web::Data::new(JsonHandler { sequencer_state: seuquencer_core.clone(), mempool_handle, + max_block_size, }); // HTTP server @@ -73,7 +80,10 @@ pub fn new_http_server( App::new() .wrap(get_cors(&cors_allowed_origins)) .app_data(handler.clone()) - .app_data(web::JsonConfig::default().limit(limits_config.json_payload_max_size)) + .app_data( + web::JsonConfig::default() + .limit(limits_config.json_payload_max_size.as_u64() as usize), + ) .wrap(middleware::Logger::default()) .service(web::resource("/").route(web::post().to(rpc_handler::))) }) diff --git a/sequencer_rpc/src/process.rs b/sequencer_rpc/src/process.rs index f1173bce..b3dca691 100644 --- a/sequencer_rpc/src/process.rs +++ b/sequencer_rpc/src/process.rs @@ -20,7 +20,7 @@ use common::{ SendTxResponse, }, }, - transaction::NSSATransaction, + transaction::{NSSATransaction, TransactionMalformationError}, }; use itertools::Itertools as _; use log::warn; @@ -94,6 +94,23 @@ impl JsonHandler let tx = borsh::from_slice::(&send_tx_req.transaction).unwrap(); let tx_hash = tx.hash(); + // Check transaction size against block size limit + // Reserve ~200 bytes for block header overhead + const BLOCK_HEADER_OVERHEAD: usize = 200; + let tx_size = borsh::to_vec(&tx) + .map_err(|_| TransactionMalformationError::FailedToDecode { tx: tx_hash })? + .len(); + + let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD); + + if tx_size > max_tx_size { + return Err(TransactionMalformationError::TransactionTooLarge { + size: tx_size, + max: max_tx_size, + } + .into()); + } + let authenticated_tx = tx .transaction_stateless_check() .inspect_err(|err| warn!("Error at pre_check {err:#?}"))?; @@ -323,7 +340,7 @@ impl JsonHandler #[cfg(test)] mod tests { - use std::{str::FromStr as _, sync::Arc}; + use std::{str::FromStr as _, sync::Arc, time::Duration}; use base58::ToBase58; use base64::{Engine, engine::general_purpose}; @@ -377,16 +394,17 @@ mod tests { genesis_id: 1, is_genesis_random: false, max_num_tx_in_block: 10, + max_block_size: bytesize::ByteSize::mib(1), mempool_max_size: 1000, - block_create_timeout_millis: 1000, + block_create_timeout: Duration::from_secs(1), port: 8080, initial_accounts, initial_commitments: vec![], signing_key: *sequencer_sign_key_for_testing().value(), - retry_pending_blocks_timeout_millis: 1000 * 60 * 4, + retry_pending_blocks_timeout: Duration::from_secs(60 * 4), bedrock_config: BedrockConfig { backoff: BackoffConfig { - start_delay_millis: 100, + start_delay: Duration::from_millis(100), max_retries: 5, }, channel_id: [42; 32].into(), @@ -437,12 +455,14 @@ mod tests { .produce_new_block_with_mempool_transactions() .unwrap(); + let max_block_size = sequencer_core.sequencer_config().max_block_size.as_u64() as usize; let sequencer_core = Arc::new(Mutex::new(sequencer_core)); ( JsonHandlerWithMockClients { sequencer_state: sequencer_core, mempool_handle, + max_block_size, }, initial_accounts, tx, diff --git a/sequencer_rpc/src/types/err_rpc.rs b/sequencer_rpc/src/types/err_rpc.rs index f9f44051..92214c54 100644 --- a/sequencer_rpc/src/types/err_rpc.rs +++ b/sequencer_rpc/src/types/err_rpc.rs @@ -44,10 +44,7 @@ impl RpcErrKind for RpcErrInternal { impl RpcErrKind for TransactionMalformationError { fn into_rpc_err(self) -> RpcError { - RpcError::new_internal_error( - Some(serde_json::to_value(self).unwrap()), - "transaction not accepted", - ) + RpcError::invalid_params(Some(serde_json::to_value(self).unwrap())) } } diff --git a/sequencer_runner/configs/debug/sequencer_config.json b/sequencer_runner/configs/debug/sequencer_config.json index 3f8cce67..002228f4 100644 --- a/sequencer_runner/configs/debug/sequencer_config.json +++ b/sequencer_runner/configs/debug/sequencer_config.json @@ -4,13 +4,14 @@ "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, + "max_block_size": "1 MiB", "mempool_max_size": 1000, - "block_create_timeout_millis": 15000, - "retry_pending_blocks_timeout_millis": 5000, + "block_create_timeout": "15s", + "retry_pending_blocks_timeout": "5s", "port": 3040, "bedrock_config": { "backoff": { - "start_delay_millis": 100, + "start_delay": "100ms", "max_retries": 5 }, "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", diff --git a/sequencer_runner/configs/docker/sequencer_config.json b/sequencer_runner/configs/docker/sequencer_config.json index 4dd817c9..ce79f4e2 100644 --- a/sequencer_runner/configs/docker/sequencer_config.json +++ b/sequencer_runner/configs/docker/sequencer_config.json @@ -4,13 +4,14 @@ "genesis_id": 1, "is_genesis_random": true, "max_num_tx_in_block": 20, + "max_block_size": "1 MiB", "mempool_max_size": 10000, - "block_create_timeout_millis": 10000, + "block_create_timeout": "10s", "port": 3040, - "retry_pending_blocks_timeout_millis": 7000, + "retry_pending_blocks_timeout": "7s", "bedrock_config": { "backoff": { - "start_delay_millis": 100, + "start_delay": "100ms", "max_retries": 5 }, "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", diff --git a/sequencer_runner/src/lib.rs b/sequencer_runner/src/lib.rs index c80ae989..7a02bfc3 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer_runner/src/lib.rs @@ -99,9 +99,8 @@ impl Drop for SequencerHandle { } pub async fn startup_sequencer(app_config: SequencerConfig) -> Result { - let block_timeout = Duration::from_millis(app_config.block_create_timeout_millis); - let retry_pending_blocks_timeout = - Duration::from_millis(app_config.retry_pending_blocks_timeout_millis); + let block_timeout = app_config.block_create_timeout; + let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout; let port = app_config.port; let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config).await; @@ -114,7 +113,8 @@ pub async fn startup_sequencer(app_config: SequencerConfig) -> Result { println!("{}", wallet_core.storage.wallet_config.sequencer_addr); } - "seq_poll_timeout_millis" => { - println!( - "{}", - wallet_core.storage.wallet_config.seq_poll_timeout_millis - ); + "seq_poll_timeout" => { + println!("{:?}", wallet_core.storage.wallet_config.seq_poll_timeout); } "seq_tx_poll_max_blocks" => { println!( @@ -97,9 +94,10 @@ impl WalletSubcommand for ConfigSubcommand { "sequencer_addr" => { wallet_core.storage.wallet_config.sequencer_addr = value.parse()?; } - "seq_poll_timeout_millis" => { - wallet_core.storage.wallet_config.seq_poll_timeout_millis = - value.parse()?; + "seq_poll_timeout" => { + wallet_core.storage.wallet_config.seq_poll_timeout = + humantime::parse_duration(&value) + .map_err(|e| anyhow::anyhow!("Invalid duration: {}", e))?; } "seq_tx_poll_max_blocks" => { wallet_core.storage.wallet_config.seq_tx_poll_max_blocks = value.parse()?; @@ -131,9 +129,9 @@ impl WalletSubcommand for ConfigSubcommand { "sequencer_addr" => { println!("HTTP V4 account_id of sequencer"); } - "seq_poll_timeout_millis" => { + "seq_poll_timeout" => { println!( - "Sequencer client retry variable: how much time to wait between retries in milliseconds(can be zero)" + "Sequencer client retry variable: how much time to wait between retries (human readable duration)" ); } "seq_tx_poll_max_blocks" => { diff --git a/wallet/src/cli/mod.rs b/wallet/src/cli/mod.rs index 30192e54..87c2bb31 100644 --- a/wallet/src/cli/mod.rs +++ b/wallet/src/cli/mod.rs @@ -173,7 +173,7 @@ pub async fn execute_subcommand( .sequencer_client .send_tx_program(transaction) .await - .context("Transaction submission error"); + .context("Transaction submission error")?; SubcommandReturnValue::Empty } @@ -191,10 +191,7 @@ pub async fn execute_continuous_run(wallet_core: &mut WalletCore) -> Result<()> .last_block; wallet_core.sync_to_block(latest_block_num).await?; - tokio::time::sleep(std::time::Duration::from_millis( - wallet_core.config().seq_poll_timeout_millis, - )) - .await; + tokio::time::sleep(wallet_core.config().seq_poll_timeout).await; } } diff --git a/wallet/src/config.rs b/wallet/src/config.rs index 08bb610d..3780a065 100644 --- a/wallet/src/config.rs +++ b/wallet/src/config.rs @@ -2,10 +2,12 @@ use std::{ collections::HashMap, io::{BufReader, Write as _}, path::Path, + time::Duration, }; use anyhow::{Context as _, Result}; use common::config::BasicAuth; +use humantime_serde; use key_protocol::key_management::{ KeyChain, key_tree::{ @@ -184,8 +186,9 @@ pub struct WalletConfig { pub override_rust_log: Option, /// Sequencer URL pub sequencer_addr: Url, - /// Sequencer polling duration for new blocks in milliseconds - pub seq_poll_timeout_millis: u64, + /// Sequencer polling duration for new blocks + #[serde(with = "humantime_serde")] + pub seq_poll_timeout: Duration, /// Sequencer polling max number of blocks to find transaction pub seq_tx_poll_max_blocks: usize, /// Sequencer polling max number error retries @@ -204,7 +207,7 @@ impl Default for WalletConfig { Self { override_rust_log: None, sequencer_addr: "http://127.0.0.1:3040".parse().unwrap(), - seq_poll_timeout_millis: 12000, + seq_poll_timeout: Duration::from_secs(12), seq_tx_poll_max_blocks: 5, seq_poll_max_retries: 5, seq_block_poll_max_amount: 100, @@ -539,7 +542,7 @@ impl WalletConfig { let WalletConfig { override_rust_log, sequencer_addr, - seq_poll_timeout_millis, + seq_poll_timeout, seq_tx_poll_max_blocks, seq_poll_max_retries, seq_block_poll_max_amount, @@ -550,7 +553,7 @@ impl WalletConfig { let WalletConfigOverrides { override_rust_log: o_override_rust_log, sequencer_addr: o_sequencer_addr, - seq_poll_timeout_millis: o_seq_poll_timeout_millis, + seq_poll_timeout: o_seq_poll_timeout, seq_tx_poll_max_blocks: o_seq_tx_poll_max_blocks, seq_poll_max_retries: o_seq_poll_max_retries, seq_block_poll_max_amount: o_seq_block_poll_max_amount, @@ -566,9 +569,9 @@ impl WalletConfig { warn!("Overriding wallet config 'sequencer_addr' to {v}"); *sequencer_addr = v; } - if let Some(v) = o_seq_poll_timeout_millis { - warn!("Overriding wallet config 'seq_poll_timeout_millis' to {v}"); - *seq_poll_timeout_millis = v; + if let Some(v) = o_seq_poll_timeout { + warn!("Overriding wallet config 'seq_poll_timeout' to {v:?}"); + *seq_poll_timeout = v; } if let Some(v) = o_seq_tx_poll_max_blocks { warn!("Overriding wallet config 'seq_tx_poll_max_blocks' to {v}"); diff --git a/wallet/src/lib.rs b/wallet/src/lib.rs index cb364fd3..0162dcb1 100644 --- a/wallet/src/lib.rs +++ b/wallet/src/lib.rs @@ -156,6 +156,8 @@ impl WalletCore { let mut storage_file = tokio::fs::File::create(&self.storage_path).await?; storage_file.write_all(&storage).await?; + // Ensure data is flushed to disk before returning to prevent race conditions + storage_file.sync_all().await?; println!("Stored persistent accounts at {:#?}", self.storage_path); @@ -168,6 +170,8 @@ impl WalletCore { let mut config_file = tokio::fs::File::create(&self.config_path).await?; config_file.write_all(&config).await?; + // Ensure data is flushed to disk before returning to prevent race conditions + config_file.sync_all().await?; info!("Stored data at {:#?}", self.config_path); diff --git a/wallet/src/poller.rs b/wallet/src/poller.rs index 73ec05c2..c037a36a 100644 --- a/wallet/src/poller.rs +++ b/wallet/src/poller.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Result; use common::{HashType, block::HashableBlockData, sequencer_client::SequencerClient}; @@ -11,8 +11,7 @@ use crate::config::WalletConfig; pub struct TxPoller { polling_max_blocks_to_query: usize, polling_max_error_attempts: u64, - // TODO: This should be Duration - polling_delay_millis: u64, + polling_delay: Duration, block_poll_max_amount: u64, client: Arc, } @@ -20,7 +19,7 @@ pub struct TxPoller { impl TxPoller { pub fn new(config: WalletConfig, client: Arc) -> Self { Self { - polling_delay_millis: config.seq_poll_timeout_millis, + polling_delay: config.seq_poll_timeout, polling_max_blocks_to_query: config.seq_tx_poll_max_blocks, polling_max_error_attempts: config.seq_poll_max_retries, block_poll_max_amount: config.seq_block_poll_max_amount, @@ -62,7 +61,7 @@ impl TxPoller { return Ok(tx); } - tokio::time::sleep(std::time::Duration::from_millis(self.polling_delay_millis)).await; + tokio::time::sleep(self.polling_delay).await; } anyhow::bail!("Transaction not found in preconfigured amount of blocks");