feat: use human-readable byte sizes and durations

This commit is contained in:
Daniil Polyakov 2026-02-24 20:52:14 +03:00
parent 8b5524901c
commit 437e5addb4
35 changed files with 154 additions and 134 deletions

16
Cargo.lock generated
View File

@ -1210,6 +1210,7 @@ dependencies = [
"anyhow",
"common",
"futures",
"humantime-serde",
"log",
"logos-blockchain-chain-broadcast-service",
"logos-blockchain-chain-service",
@ -3440,6 +3441,16 @@ version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424"
[[package]]
name = "humantime-serde"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c"
dependencies = [
"humantime",
"serde",
]
[[package]]
name = "hydration_context"
version = "0.3.0"
@ -3735,6 +3746,7 @@ dependencies = [
"borsh",
"common",
"futures",
"humantime-serde",
"log",
"logos-blockchain-core",
"nssa",
@ -5573,6 +5585,7 @@ dependencies = [
"base58",
"borsh",
"bytemuck",
"bytesize",
"chacha20",
"k256",
"risc0-zkvm",
@ -7445,6 +7458,7 @@ dependencies = [
"chrono",
"common",
"futures",
"humantime-serde",
"jsonrpsee",
"log",
"logos-blockchain-core",
@ -8981,6 +8995,8 @@ dependencies = [
"env_logger",
"futures",
"hex",
"humantime",
"humantime-serde",
"indicatif",
"itertools 0.14.0",
"key_protocol",

View File

@ -88,6 +88,8 @@ sha2 = "0.10.8"
hex = "0.4.3"
bytemuck = "1.24.0"
bytesize = { version = "2.3.1", features = ["serde"] }
humantime-serde = "1.1"
humantime = "2.1"
aes-gcm = "0.10.3"
toml = "0.7.4"
bincode = "1.3.3"

View File

@ -13,6 +13,7 @@ tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true

View File

@ -3,6 +3,8 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
use logos_blockchain_chain_service::CryptarchiaInfo;
@ -15,14 +17,15 @@ use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
pub start_delay_millis: u64,
#[serde(with = "humantime_serde")]
pub start_delay: Duration,
pub max_retries: usize,
}
impl Default for BackoffConfig {
fn default() -> Self {
Self {
start_delay_millis: 100,
start_delay: Duration::from_millis(100),
max_retries: 5,
}
}
@ -93,7 +96,9 @@ impl BedrockClient {
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
tokio_retry::strategy::FibonacciBackoff::from_millis(self.backoff.start_delay_millis)
.take(self.backoff.max_retries)
tokio_retry::strategy::FibonacciBackoff::from_millis(
self.backoff.start_delay.as_millis() as u64
)
.take(self.backoff.max_retries)
}
}

View File

@ -1,3 +1,4 @@
use bytesize::ByteSize;
use serde::{Deserialize, Serialize};
pub mod errors;
@ -8,13 +9,13 @@ pub mod requests;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcLimitsConfig {
/// Maximum byte size of the json payload.
pub json_payload_max_size: usize,
pub json_payload_max_size: ByteSize,
}
impl Default for RpcLimitsConfig {
fn default() -> Self {
Self {
json_payload_max_size: 10 * 1024 * 1024,
json_payload_max_size: ByteSize::mib(10),
}
}
}

View File

@ -103,4 +103,6 @@ pub enum TransactionMalformationError {
InvalidSignature,
#[error("Failed to decode transaction with hash: {tx:?}")]
FailedToDecode { tx: HashType },
#[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")]
TransactionTooLarge { size: usize, max: usize },
}

View File

@ -346,7 +346,7 @@ _wallet_config() {
'all'
'override_rust_log'
'sequencer_addr'
'seq_poll_timeout_millis'
'seq_poll_timeout'
'seq_tx_poll_max_blocks'
'seq_poll_max_retries'
'seq_block_poll_max_amount'

View File

@ -1,9 +1,9 @@
{
"resubscribe_interval_millis": 1000,
"resubscribe_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
"start_delay_millis": 100,
"start_delay": "100ms",
"max_retries": 5
}
},

View File

@ -6,12 +6,12 @@
"max_num_tx_in_block": 20,
"max_block_size": "1 MiB",
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"retry_pending_blocks_timeout_millis": 7000,
"block_create_timeout": "10s",
"retry_pending_blocks_timeout": "7s",
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"start_delay": "100ms",
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",

View File

@ -14,6 +14,7 @@ storage.workspace = true
anyhow.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
tokio.workspace = true
borsh.workspace = true
futures.workspace = true

View File

@ -2,6 +2,7 @@ use std::{
fs::File,
io::BufReader,
path::{Path, PathBuf},
time::Duration,
};
use anyhow::{Context as _, Result};
@ -10,6 +11,7 @@ use common::{
block::{AccountInitialData, CommitmentsInitialData},
config::BasicAuth,
};
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use url::Url;
@ -33,7 +35,8 @@ pub struct IndexerConfig {
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencers signing key
pub signing_key: [u8; 32],
pub consensus_info_polling_interval_millis: u64,
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub channel_id: ChannelId,
}

View File

@ -174,13 +174,10 @@ impl IndexerCore {
break Ok(next_lib);
} else {
info!(
"Wait {}ms to not spam the node",
self.config.consensus_info_polling_interval_millis
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(std::time::Duration::from_millis(
self.config.consensus_info_polling_interval_millis,
))
.await;
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
}
}
}

View File

@ -1,10 +1,10 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval_millis": 10000,
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {
"start_delay_millis": 100,
"start_delay": "100ms",
"max_retries": 5
}
},

View File

@ -1,4 +1,4 @@
use std::{net::SocketAddr, path::PathBuf};
use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context, Result};
use bytesize::ByteSize;
@ -20,13 +20,13 @@ pub fn indexer_config(
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval_millis: 10000,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay_millis: 100,
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
@ -42,7 +42,7 @@ pub struct SequencerPartialConfig {
pub max_num_tx_in_block: usize,
pub max_block_size: ByteSize,
pub mempool_max_size: usize,
pub block_create_timeout_millis: u64,
pub block_create_timeout: Duration,
}
impl Default for SequencerPartialConfig {
@ -51,7 +51,7 @@ impl Default for SequencerPartialConfig {
max_num_tx_in_block: 20,
max_block_size: ByteSize::mib(1),
mempool_max_size: 10_000,
block_create_timeout_millis: 10_000,
block_create_timeout: Duration::from_secs(10),
}
}
}
@ -67,7 +67,7 @@ pub fn sequencer_config(
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout_millis,
block_create_timeout,
} = partial;
Ok(SequencerConfig {
@ -78,15 +78,15 @@ pub fn sequencer_config(
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout_millis,
retry_pending_blocks_timeout_millis: 120_000,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_secs(120),
port: 0,
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay_millis: 100,
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
@ -107,7 +107,7 @@ pub fn wallet_config(
override_rust_log: None,
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?,
seq_poll_timeout_millis: 30_000,
seq_poll_timeout: Duration::from_secs(30),
seq_tx_poll_max_blocks: 15,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,

View File

@ -16,7 +16,7 @@ async fn reject_oversized_transaction() -> Result<()> {
max_num_tx_in_block: 100,
max_block_size: ByteSize::mib(1),
mempool_max_size: 1000,
block_create_timeout_millis: 10_000,
block_create_timeout: Duration::from_secs(10),
})
.build()
.await?;
@ -57,7 +57,7 @@ async fn accept_transaction_within_limit() -> Result<()> {
max_num_tx_in_block: 100,
max_block_size: ByteSize::mib(1),
mempool_max_size: 1000,
block_create_timeout_millis: 10_000,
block_create_timeout: Duration::from_secs(10),
})
.build()
.await?;
@ -99,7 +99,7 @@ async fn transaction_deferred_to_next_block_when_current_full() -> Result<()> {
max_num_tx_in_block: 100,
max_block_size: block_size,
mempool_max_size: 1000,
block_create_timeout_millis: 10_000,
block_create_timeout: Duration::from_secs(10),
})
.build()
.await?;

View File

@ -8,22 +8,22 @@ use wallet::cli::{Command, config::ConfigSubcommand};
async fn modify_config_field() -> Result<()> {
let mut ctx = TestContext::new().await?;
let old_seq_poll_timeout_millis = ctx.wallet().config().seq_poll_timeout_millis;
let old_seq_poll_timeout = ctx.wallet().config().seq_poll_timeout;
// Change config field
let command = Command::Config(ConfigSubcommand::Set {
key: "seq_poll_timeout_millis".to_string(),
value: "1000".to_string(),
key: "seq_poll_timeout".to_string(),
value: "1s".to_string(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let new_seq_poll_timeout_millis = ctx.wallet().config().seq_poll_timeout_millis;
assert_eq!(new_seq_poll_timeout_millis, 1000);
let new_seq_poll_timeout = ctx.wallet().config().seq_poll_timeout;
assert_eq!(new_seq_poll_timeout, std::time::Duration::from_secs(1));
// Return how it was at the beginning
let command = Command::Config(ConfigSubcommand::Set {
key: "seq_poll_timeout_millis".to_string(),
value: old_seq_poll_timeout_millis.to_string(),
key: "seq_poll_timeout".to_string(),
value: format!("{:?}", old_seq_poll_timeout),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;

View File

@ -181,7 +181,7 @@ impl TpsTestManager {
max_num_tx_in_block: 300,
max_block_size: ByteSize::mb(500),
mempool_max_size: 10_000,
block_create_timeout_millis: 12_000,
block_create_timeout: Duration::from_secs(12),
}
}
}

View File

@ -11,6 +11,7 @@ serde.workspace = true
serde_with.workspace = true
thiserror.workspace = true
bytemuck.workspace = true
bytesize.workspace = true
base58.workspace = true
k256 = { workspace = true, optional = true }
chacha20 = { version = "0.9", default-features = false }

View File

@ -1,9 +1,10 @@
use std::ops::Deref;
use borsh::{BorshDeserialize, BorshSerialize};
use bytesize::ByteSize;
use serde::{Deserialize, Serialize};
pub const DATA_MAX_LENGTH_IN_BYTES: usize = 100 * 1024; // 100 KiB
pub const DATA_MAX_LENGTH: ByteSize = ByteSize::kib(100);
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, BorshSerialize)]
pub struct Data(Vec<u8>);
@ -22,7 +23,7 @@ impl Data {
let mut u32_bytes = [0u8; 4];
cursor.read_exact(&mut u32_bytes)?;
let data_length = u32::from_le_bytes(u32_bytes);
if data_length as usize > DATA_MAX_LENGTH_IN_BYTES {
if data_length as usize > DATA_MAX_LENGTH.as_u64() as usize {
return Err(
std::io::Error::new(std::io::ErrorKind::InvalidData, DataTooBigError).into(),
);
@ -35,7 +36,7 @@ impl Data {
}
#[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)]
#[error("data length exceeds maximum allowed length of {DATA_MAX_LENGTH_IN_BYTES} bytes")]
#[error("data length exceeds maximum allowed length of {} bytes", DATA_MAX_LENGTH.as_u64())]
pub struct DataTooBigError;
impl From<Data> for Vec<u8> {
@ -48,7 +49,7 @@ impl TryFrom<Vec<u8>> for Data {
type Error = DataTooBigError;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
if value.len() > DATA_MAX_LENGTH_IN_BYTES {
if value.len() > DATA_MAX_LENGTH.as_u64() as usize {
Err(DataTooBigError)
} else {
Ok(Self(value))
@ -78,7 +79,7 @@ impl<'de> Deserialize<'de> for Data {
/// Data deserialization visitor.
///
/// Compared to a simple deserialization into a `Vec<u8>`, this visitor enforces
/// early length check defined by [`DATA_MAX_LENGTH_IN_BYTES`].
/// early length check defined by [`DATA_MAX_LENGTH`].
struct DataVisitor;
impl<'de> serde::de::Visitor<'de> for DataVisitor {
@ -88,7 +89,7 @@ impl<'de> Deserialize<'de> for Data {
write!(
formatter,
"a byte array with length not exceeding {} bytes",
DATA_MAX_LENGTH_IN_BYTES
DATA_MAX_LENGTH.as_u64()
)
}
@ -96,11 +97,14 @@ impl<'de> Deserialize<'de> for Data {
where
A: serde::de::SeqAccess<'de>,
{
let mut vec =
Vec::with_capacity(seq.size_hint().unwrap_or(0).min(DATA_MAX_LENGTH_IN_BYTES));
let mut vec = Vec::with_capacity(
seq.size_hint()
.unwrap_or(0)
.min(DATA_MAX_LENGTH.as_u64() as usize),
);
while let Some(value) = seq.next_element()? {
if vec.len() >= DATA_MAX_LENGTH_IN_BYTES {
if vec.len() >= DATA_MAX_LENGTH.as_u64() as usize {
return Err(serde::de::Error::custom(DataTooBigError));
}
vec.push(value);
@ -121,7 +125,7 @@ impl BorshDeserialize for Data {
let len = u32::deserialize_reader(reader)?;
match len {
0 => Ok(Self::default()),
len if len as usize > DATA_MAX_LENGTH_IN_BYTES => Err(std::io::Error::new(
len if len as usize > DATA_MAX_LENGTH.as_u64() as usize => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
DataTooBigError,
)),
@ -140,21 +144,21 @@ mod tests {
#[test]
fn test_data_max_length_allowed() {
let max_vec = vec![0u8; DATA_MAX_LENGTH_IN_BYTES];
let max_vec = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize];
let result = Data::try_from(max_vec);
assert!(result.is_ok());
}
#[test]
fn test_data_too_big_error() {
let big_vec = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1];
let big_vec = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1];
let result = Data::try_from(big_vec);
assert!(matches!(result, Err(DataTooBigError)));
}
#[test]
fn test_borsh_deserialize_exceeding_limit_error() {
let too_big_data = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1];
let too_big_data = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1];
let mut serialized = Vec::new();
<_ as BorshSerialize>::serialize(&too_big_data, &mut serialized).unwrap();
@ -164,7 +168,7 @@ mod tests {
#[test]
fn test_json_deserialize_exceeding_limit_error() {
let data = vec![0u8; DATA_MAX_LENGTH_IN_BYTES + 1];
let data = vec![0u8; DATA_MAX_LENGTH.as_u64() as usize + 1];
let json = serde_json::to_string(&data).unwrap();
let result: Result<Data, _> = serde_json::from_str(&json);

View File

@ -1332,7 +1332,8 @@ pub mod tests {
AccountId::new([0; 32]),
);
let large_data: Vec<u8> = vec![0; nssa_core::account::data::DATA_MAX_LENGTH_IN_BYTES + 1];
let large_data: Vec<u8> =
vec![0; nssa_core::account::data::DATA_MAX_LENGTH.as_u64() as usize + 1];
let result = execute_and_prove(
vec![public_account],

View File

@ -16,6 +16,7 @@ base58.workspace = true
anyhow.workspace = true
serde.workspace = true
serde_json.workspace = true
humantime-serde.workspace = true
tempfile.workspace = true
chrono.workspace = true
log.workspace = true

View File

@ -2,6 +2,7 @@ use std::{
fs::File,
io::BufReader,
path::{Path, PathBuf},
time::Duration,
};
use anyhow::Result;
@ -11,6 +12,7 @@ use common::{
block::{AccountInitialData, CommitmentsInitialData},
config::BasicAuth,
};
use humantime_serde;
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use url::Url;
@ -34,9 +36,11 @@ pub struct SequencerConfig {
/// Mempool maximum size
pub mempool_max_size: usize,
/// Interval in which blocks produced
pub block_create_timeout_millis: u64,
#[serde(with = "humantime_serde")]
pub block_create_timeout: Duration,
/// Interval in which pending blocks are retried
pub retry_pending_blocks_timeout_millis: u64,
#[serde(with = "humantime_serde")]
pub retry_pending_blocks_timeout: Duration,
/// Port to listen
pub port: u16,
/// List of initial accounts data

View File

@ -1,4 +1,4 @@
use std::{fmt::Display, path::Path, time::Instant};
use std::{path::Path, time::Instant};
use anyhow::{Context as _, Result, anyhow};
use bedrock_client::SignedMantleTx;
@ -13,7 +13,6 @@ use config::SequencerConfig;
use log::{error, info, warn};
use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SIZE, Ed25519Key};
use mempool::{MemPool, MemPoolHandle};
use serde::{Deserialize, Serialize};
use crate::{
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId},
@ -44,21 +43,6 @@ pub struct SequencerCore<
indexer_client: IC,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum TransactionMalformationError {
InvalidSignature,
FailedToDecode { tx: HashType },
TransactionTooLarge { size: usize, max: usize },
}
impl Display for TransactionMalformationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{self:#?}")
}
}
impl std::error::Error for TransactionMalformationError {}
impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, IC> {
/// Starts the sequencer using the provided configuration.
/// If an existing database is found, the sequencer state is loaded from it and
@ -376,7 +360,7 @@ fn load_or_create_signing_key(path: &Path) -> Result<Ed25519Key> {
#[cfg(all(test, feature = "mock"))]
mod tests {
use std::{pin::pin, str::FromStr as _};
use std::{pin::pin, str::FromStr as _, time::Duration};
use base58::ToBase58;
use bedrock_client::BackoffConfig;
@ -407,21 +391,21 @@ mod tests {
max_num_tx_in_block: 10,
max_block_size: bytesize::ByteSize::mib(1),
mempool_max_size: 10000,
block_create_timeout_millis: 1000,
block_create_timeout: Duration::from_secs(1),
port: 8080,
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay_millis: 100,
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: ChannelId::from([0; 32]),
node_url: "http://not-used-in-unit-tests".parse().unwrap(),
auth: None,
},
retry_pending_blocks_timeout_millis: 1000 * 60 * 4,
retry_pending_blocks_timeout: Duration::from_secs(60 * 4),
indexer_rpc_url: "ws://localhost:8779".parse().unwrap(),
}
}

View File

@ -80,7 +80,10 @@ pub async fn new_http_server(
App::new()
.wrap(get_cors(&cors_allowed_origins))
.app_data(handler.clone())
.app_data(web::JsonConfig::default().limit(limits_config.json_payload_max_size))
.app_data(
web::JsonConfig::default()
.limit(limits_config.json_payload_max_size.as_u64() as usize),
)
.wrap(middleware::Logger::default())
.service(web::resource("/").route(web::post().to(rpc_handler::<JsonHandler>)))
})

View File

@ -20,7 +20,7 @@ use common::{
SendTxResponse,
},
},
transaction::NSSATransaction,
transaction::{NSSATransaction, TransactionMalformationError},
};
use itertools::Itertools as _;
use log::warn;
@ -98,21 +98,17 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> JsonHandler<BC, IC>
// Reserve ~200 bytes for block header overhead
const BLOCK_HEADER_OVERHEAD: usize = 200;
let tx_size = borsh::to_vec(&tx)
.map_err(
|_| sequencer_core::TransactionMalformationError::FailedToDecode { tx: tx_hash },
)?
.map_err(|_| TransactionMalformationError::FailedToDecode { tx: tx_hash })?
.len();
let max_tx_size = self.max_block_size.saturating_sub(BLOCK_HEADER_OVERHEAD);
if tx_size > max_tx_size {
return Err(
sequencer_core::TransactionMalformationError::TransactionTooLarge {
size: tx_size,
max: max_tx_size,
}
.into(),
);
return Err(TransactionMalformationError::TransactionTooLarge {
size: tx_size,
max: max_tx_size,
}
.into());
}
let authenticated_tx = tx
@ -344,7 +340,7 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> JsonHandler<BC, IC>
#[cfg(test)]
mod tests {
use std::{str::FromStr as _, sync::Arc};
use std::{str::FromStr as _, sync::Arc, time::Duration};
use base58::ToBase58;
use base64::{Engine, engine::general_purpose};
@ -400,15 +396,15 @@ mod tests {
max_num_tx_in_block: 10,
max_block_size: bytesize::ByteSize::mib(1),
mempool_max_size: 1000,
block_create_timeout_millis: 1000,
block_create_timeout: Duration::from_secs(1),
port: 8080,
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
retry_pending_blocks_timeout_millis: 1000 * 60 * 4,
retry_pending_blocks_timeout: Duration::from_secs(60 * 4),
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay_millis: 100,
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: [42; 32].into(),

View File

@ -6,12 +6,12 @@
"max_num_tx_in_block": 20,
"max_block_size": "1 MiB",
"mempool_max_size": 1000,
"block_create_timeout_millis": 15000,
"retry_pending_blocks_timeout_millis": 5000,
"block_create_timeout": "15s",
"retry_pending_blocks_timeout": "5s",
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"start_delay": "100ms",
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",

View File

@ -6,12 +6,12 @@
"max_num_tx_in_block": 20,
"max_block_size": "1 MiB",
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"block_create_timeout": "10s",
"port": 3040,
"retry_pending_blocks_timeout_millis": 7000,
"retry_pending_blocks_timeout": "7s",
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"start_delay": "100ms",
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",

View File

@ -99,9 +99,8 @@ impl Drop for SequencerHandle {
}
pub async fn startup_sequencer(app_config: SequencerConfig) -> Result<SequencerHandle> {
let block_timeout = Duration::from_millis(app_config.block_create_timeout_millis);
let retry_pending_blocks_timeout =
Duration::from_millis(app_config.retry_pending_blocks_timeout_millis);
let block_timeout = app_config.block_create_timeout;
let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout;
let port = app_config.port;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config).await;

View File

@ -17,6 +17,8 @@ serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
humantime.workspace = true
tokio = { workspace = true, features = ["macros"] }
clap.workspace = true
base64.workspace = true

View File

@ -1,7 +1,7 @@
{
"override_rust_log": null,
"sequencer_addr": "http://127.0.0.1:3040",
"seq_poll_timeout_millis": 30000,
"seq_poll_timeout": "30s",
"seq_tx_poll_max_blocks": 15,
"seq_poll_max_retries": 10,
"seq_block_poll_max_amount": 100,

View File

@ -262,7 +262,7 @@ mod tests {
WalletConfig {
override_rust_log: None,
sequencer_addr: "http://127.0.0.1".parse().unwrap(),
seq_poll_timeout_millis: 12000,
seq_poll_timeout: std::time::Duration::from_secs(12),
seq_tx_poll_max_blocks: 5,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,

View File

@ -49,11 +49,8 @@ impl WalletSubcommand for ConfigSubcommand {
"sequencer_addr" => {
println!("{}", wallet_core.storage.wallet_config.sequencer_addr);
}
"seq_poll_timeout_millis" => {
println!(
"{}",
wallet_core.storage.wallet_config.seq_poll_timeout_millis
);
"seq_poll_timeout" => {
println!("{:?}", wallet_core.storage.wallet_config.seq_poll_timeout);
}
"seq_tx_poll_max_blocks" => {
println!(
@ -97,9 +94,10 @@ impl WalletSubcommand for ConfigSubcommand {
"sequencer_addr" => {
wallet_core.storage.wallet_config.sequencer_addr = value.parse()?;
}
"seq_poll_timeout_millis" => {
wallet_core.storage.wallet_config.seq_poll_timeout_millis =
value.parse()?;
"seq_poll_timeout" => {
wallet_core.storage.wallet_config.seq_poll_timeout =
humantime::parse_duration(&value)
.map_err(|e| anyhow::anyhow!("Invalid duration: {}", e))?;
}
"seq_tx_poll_max_blocks" => {
wallet_core.storage.wallet_config.seq_tx_poll_max_blocks = value.parse()?;
@ -131,9 +129,9 @@ impl WalletSubcommand for ConfigSubcommand {
"sequencer_addr" => {
println!("HTTP V4 account_id of sequencer");
}
"seq_poll_timeout_millis" => {
"seq_poll_timeout" => {
println!(
"Sequencer client retry variable: how much time to wait between retries in milliseconds(can be zero)"
"Sequencer client retry variable: how much time to wait between retries (human readable duration)"
);
}
"seq_tx_poll_max_blocks" => {

View File

@ -191,10 +191,7 @@ pub async fn execute_continuous_run(wallet_core: &mut WalletCore) -> Result<()>
.last_block;
wallet_core.sync_to_block(latest_block_num).await?;
tokio::time::sleep(std::time::Duration::from_millis(
wallet_core.config().seq_poll_timeout_millis,
))
.await;
tokio::time::sleep(wallet_core.config().seq_poll_timeout).await;
}
}

View File

@ -2,10 +2,12 @@ use std::{
collections::HashMap,
io::{BufReader, Write as _},
path::Path,
time::Duration,
};
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use humantime_serde;
use key_protocol::key_management::{
KeyChain,
key_tree::{
@ -184,8 +186,9 @@ pub struct WalletConfig {
pub override_rust_log: Option<String>,
/// Sequencer URL
pub sequencer_addr: Url,
/// Sequencer polling duration for new blocks in milliseconds
pub seq_poll_timeout_millis: u64,
/// Sequencer polling duration for new blocks
#[serde(with = "humantime_serde")]
pub seq_poll_timeout: Duration,
/// Sequencer polling max number of blocks to find transaction
pub seq_tx_poll_max_blocks: usize,
/// Sequencer polling max number error retries
@ -204,7 +207,7 @@ impl Default for WalletConfig {
Self {
override_rust_log: None,
sequencer_addr: "http://127.0.0.1:3040".parse().unwrap(),
seq_poll_timeout_millis: 12000,
seq_poll_timeout: Duration::from_secs(12),
seq_tx_poll_max_blocks: 5,
seq_poll_max_retries: 5,
seq_block_poll_max_amount: 100,
@ -539,7 +542,7 @@ impl WalletConfig {
let WalletConfig {
override_rust_log,
sequencer_addr,
seq_poll_timeout_millis,
seq_poll_timeout,
seq_tx_poll_max_blocks,
seq_poll_max_retries,
seq_block_poll_max_amount,
@ -550,7 +553,7 @@ impl WalletConfig {
let WalletConfigOverrides {
override_rust_log: o_override_rust_log,
sequencer_addr: o_sequencer_addr,
seq_poll_timeout_millis: o_seq_poll_timeout_millis,
seq_poll_timeout: o_seq_poll_timeout,
seq_tx_poll_max_blocks: o_seq_tx_poll_max_blocks,
seq_poll_max_retries: o_seq_poll_max_retries,
seq_block_poll_max_amount: o_seq_block_poll_max_amount,
@ -566,9 +569,9 @@ impl WalletConfig {
warn!("Overriding wallet config 'sequencer_addr' to {v}");
*sequencer_addr = v;
}
if let Some(v) = o_seq_poll_timeout_millis {
warn!("Overriding wallet config 'seq_poll_timeout_millis' to {v}");
*seq_poll_timeout_millis = v;
if let Some(v) = o_seq_poll_timeout {
warn!("Overriding wallet config 'seq_poll_timeout' to {v:?}");
*seq_poll_timeout = v;
}
if let Some(v) = o_seq_tx_poll_max_blocks {
warn!("Overriding wallet config 'seq_tx_poll_max_blocks' to {v}");

View File

@ -1,4 +1,4 @@
use std::sync::Arc;
use std::{sync::Arc, time::Duration};
use anyhow::Result;
use common::{HashType, block::HashableBlockData, sequencer_client::SequencerClient};
@ -11,8 +11,7 @@ use crate::config::WalletConfig;
pub struct TxPoller {
polling_max_blocks_to_query: usize,
polling_max_error_attempts: u64,
// TODO: This should be Duration
polling_delay_millis: u64,
polling_delay: Duration,
block_poll_max_amount: u64,
client: Arc<SequencerClient>,
}
@ -20,7 +19,7 @@ pub struct TxPoller {
impl TxPoller {
pub fn new(config: WalletConfig, client: Arc<SequencerClient>) -> Self {
Self {
polling_delay_millis: config.seq_poll_timeout_millis,
polling_delay: config.seq_poll_timeout,
polling_max_blocks_to_query: config.seq_tx_poll_max_blocks,
polling_max_error_attempts: config.seq_poll_max_retries,
block_poll_max_amount: config.seq_block_poll_max_amount,
@ -62,7 +61,7 @@ impl TxPoller {
return Ok(tx);
}
tokio::time::sleep(std::time::Duration::from_millis(self.polling_delay_millis)).await;
tokio::time::sleep(self.polling_delay).await;
}
anyhow::bail!("Transaction not found in preconfigured amount of blocks");