feat: implement all-in-one docker compose

This commit is contained in:
Daniil Polyakov 2026-02-10 00:19:37 +03:00
parent 90c70a7f76
commit f248cf2ae0
24 changed files with 334 additions and 45 deletions

View File

@ -35,7 +35,7 @@ To the best of our knowledge, this approach is unique to Nescience. Other progra
- Alice submits a transaction to execute the token program `Transfer` function on-chain, specifying Charlie's public account as recipient.
- The execution is handled on-chain without ZKPs involved.
- Alice's and Charlie's accounts are modified according to the transaction.
#### Key points:
- The same token program is used in all executions.
- The difference lies in execution mode: public executions update visible accounts on-chain, while private executions rely on ZKPs.
@ -143,6 +143,20 @@ If everything went well you should see an output similar to this:
[2025-11-13T19:50:39Z INFO sequencer_runner] Waiting for new transactions
```
# Running with Docker
You can run the whole setup with Docker:
```bash
docker compose up
```
With that you can send transactions from local wallet to the Sequencer running inside Docker using `wallet/configs/debug` as well as exploring block by opening `http://localhost:8080`.
## Caution for local image builds
If you're going to build sequencer image locally you should better adjust default docker settings and set `defaultKeepStorage` at least `25GB` so that it can keep layers properly cached.
# Try the Wallet CLI
## Install
@ -165,9 +179,9 @@ This tutorial walks you through creating accounts and executing NSSA programs in
> The NSSA state is split into two separate but interconnected components: the public state and the private state.
> The public state is an on-chain, publicly visible record of accounts indexed by their Account IDs
> The private state mirrors this, but the actual account values are stored locally by each account owner. On-chain, only a hidden commitment to each private account state is recorded. This allows the chain to enforce freshness (i.e., prevent the reuse of stale private states) while preserving privacy and unlinkability across executions and private accounts.
>
>
> Every piece of state in NSSA is stored in an account (public or private). Accounts are either uninitialized or are owned by a program, and programs can only modify the accounts they own.
>
>
> In NSSA, accounts can only be modified through program execution. A program is the sole mechanism that can change an accounts value.
> Programs run publicly when all involved accounts are public, and privately when at least one private account participates.
@ -429,7 +443,7 @@ This mechanism enables a common use case: transferring funds from any account (p
#### Sending tokens from the public account to a private account owned by someone else
For this tutorial, well simulate that scenario by creating a new private account that we own, but well treat it as if it belonged to someone else.
For this tutorial, well simulate that scenario by creating a new private account that we own, but well treat it as if it belonged to someone else.
Let's create a new (uninitialized) private account like before:

View File

@ -1,4 +1,4 @@
# Bedrock Configuration Files for Integration Tests
# Bedrock Configuration Files for All-in-One run and Integration Tests
## How to update

View File

@ -4,11 +4,9 @@ timeout: 10
# Tracing
tracing_settings:
logger: None
logger: Stdout
tracing: None
filter: !EnvFilter
filters:
logos-blockchain: debug
filter: None
metrics: None
console: None
level: INFO
level: DEBUG

View File

@ -3,7 +3,7 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt};
use log::warn;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
pub use logos_blockchain_common_http_client::{CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
@ -39,6 +39,7 @@ pub struct BedrockClient {
impl BedrockClient {
pub fn new(backoff: BackoffConfig, node_url: Url, auth: Option<BasicAuth>) -> Result<Self> {
info!("Creating Bedrock client with node URL {node_url}");
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_secs(60))
@ -61,7 +62,7 @@ impl BedrockClient {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.inspect_err(|err| warn!("Transaction posting failed with err: {err:#?}"))
.inspect_err(|err| warn!("Transaction posting failed with error: {err:#}"))
})
.await
}
@ -77,7 +78,7 @@ impl BedrockClient {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with err: {err:#?}"))
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}

View File

@ -0,0 +1,11 @@
{
"resubscribe_interval_millis": 1000,
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
"start_delay_millis": 100,
"max_retries": 5
}
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101"
}

View File

@ -0,0 +1,168 @@
{
"home": "/var/lib/sequencer_runner",
"override_rust_log": null,
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"retry_pending_blocks_timeout_millis": 7000,
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://logos-blockchain-node-0:18080"
},
"indexer_rpc_url": "ws://indexer_service:8779",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
63,
202,
178,
231,
183,
82,
237,
212,
216,
221,
215,
255,
153,
101,
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
228,
97
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
}
},
{
"npk": [
192,
251,
166,
243,
167,
236,
84,
249,
35,
136,
130,
172,
219,
225,
161,
139,
229,
89,
243,
125,
194,
213,
209,
30,
23,
174,
100,
244,
124,
74,
140,
47
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
}
}
],
"signing_key": [
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37
]
}

View File

@ -0,0 +1,27 @@
# This file is automatically applied on top of docker-compose.yml when running `docker compose` commands.
services:
logos-blockchain-node-0:
ports: !override
- "18080:18080/tcp"
environment:
- RUST_LOG=error
sequencer_runner:
depends_on:
- logos-blockchain-node-0
- indexer_service
volumes: !override
- ./configs/docker-all-in-one/sequencer:/etc/sequencer_runner
indexer_service:
depends_on:
- logos-blockchain-node-0
volumes:
- ./configs/docker-all-in-one/indexer/indexer_config.json:/etc/indexer_service/indexer_config.json
explorer_service:
depends_on:
- indexer_service
environment:
- INDEXER_RPC_URL=http://indexer_service:8779

13
docker-compose.yml Normal file
View File

@ -0,0 +1,13 @@
# All-in-one docker compose configuration.
# It runs all services from this repo and the bedrock nodes in a single docker network.
# This is useful for development and testing purposes.
include:
- path:
bedrock/docker-compose.yml
- path:
sequencer_runner/docker-compose.yml
- path:
indexer/service/docker-compose.yml
- path:
explorer_service/docker-compose.yml

View File

@ -12,6 +12,7 @@ pub struct BedrockClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>,
}

View File

@ -36,7 +36,9 @@ RUN strip /indexer_service/target/release/indexer_service
FROM debian:trixie-slim
# Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash indexer_service_user
RUN useradd -m -u 1000 -s /bin/bash indexer_service_user && \
mkdir -p /indexer_service /etc/indexer_service && \
chown -R indexer_service_user:indexer_service_user /indexer_service /etc/indexer_service
# Copy binary from builder
COPY --from=builder --chown=indexer_service_user:indexer_service_user /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service
@ -61,4 +63,4 @@ ENV RUST_LOG=info
USER indexer_service_user
WORKDIR /indexer_service
CMD ["indexer_service"]
CMD ["indexer_service", "/etc/indexer_service/indexer_config.json"]

View File

@ -0,0 +1,11 @@
{
"resubscribe_interval_millis": 1000,
"bedrock_client_config": {
"addr": "http://localhost:18080",
"backoff": {
"start_delay_millis": 100,
"max_retries": 5
}
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101"
}

View File

@ -2,8 +2,11 @@ services:
indexer_service:
image: lssa/indexer_service
build:
context: ..
dockerfile: indexer_service/Dockerfile
context: ../..
dockerfile: indexer/service/Dockerfile
container_name: indexer_service
ports:
- "8779:8779"
volumes:
# Mount configuration
- ./configs/indexer_config.json:/etc/indexer_service/indexer_config.json

View File

@ -8,7 +8,7 @@ use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Tra
use jsonrpsee::{
SubscriptionSink,
core::{Serialize, SubscriptionResult},
types::ErrorObjectOwned,
types::{ErrorCode, ErrorObject, ErrorObjectOwned},
};
use log::{debug, error, info, warn};
use tokio::sync::mpsc::UnboundedSender;
@ -54,23 +54,23 @@ impl indexer_service_rpc::RpcServer for IndexerService {
}
async fn get_block_by_id(&self, _block_id: BlockId) -> Result<Block, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
async fn get_block_by_hash(&self, _block_hash: HashType) -> Result<Block, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
async fn get_account(&self, _account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
async fn get_transaction(&self, _tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
async fn get_blocks(&self, _offset: u32, _limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
async fn get_transactions_by_account(
@ -79,7 +79,7 @@ impl indexer_service_rpc::RpcServer for IndexerService {
_limit: u32,
_offset: u32,
) -> Result<Vec<Transaction>, ErrorObjectOwned> {
todo!()
Err(not_yet_implemented_error())
}
}
@ -105,6 +105,7 @@ impl SubscriptionService {
// Respawn the subscription service loop if it has finished (either with error or panic)
if guard.handle.is_finished() {
drop(guard);
let new_parts = Self::spawn_respond_subscribers_loop(self.indexer.clone());
let old_handle_and_sender = self.parts.swap(Arc::new(new_parts));
let old_parts = Arc::into_inner(old_handle_and_sender)
@ -113,7 +114,7 @@ impl SubscriptionService {
match old_parts.handle.await {
Ok(Err(err)) => {
error!(
"Subscription service loop has unexpectedly finished with err: {err:#}"
"Subscription service loop has unexpectedly finished with error: {err:#}"
);
}
Err(err) => {
@ -217,3 +218,11 @@ impl<T> Drop for Subscription<T> {
);
}
}
fn not_yet_implemented_error() -> ErrorObjectOwned {
ErrorObject::owned(
ErrorCode::InternalError.code(),
"Not yet implemented",
Option::<String>::None,
)
}

View File

@ -97,7 +97,8 @@ impl TestContext {
async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let bedrock_compose_path = PathBuf::from(manifest_dir).join("bedrock/docker-compose.yml");
let bedrock_compose_path =
PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
.await

View File

@ -1,3 +1,5 @@
# Should be kept in sync with Dockerfiles
[toolchain]
channel = "1.91.1"
profile = "default"

View File

@ -1,6 +1,7 @@
use std::{ops::Deref, sync::Arc};
use anyhow::{Context as _, Result};
use log::info;
pub use url::Url;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
@ -13,6 +14,8 @@ pub struct IndexerClient(Arc<jsonrpsee::ws_client::WsClient>);
impl IndexerClientTrait for IndexerClient {
async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = jsonrpsee::ws_client::WsClientBuilder::default()
.build(indexer_url)
.await

View File

@ -1,15 +1,34 @@
# Chef stage - uses pre-built cargo-chef image
FROM lukemathwalker/cargo-chef:latest-rust-1.91.1-slim-trixie AS chef
# Install build dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
build-essential \
pkg-config \
libssl-dev \
libclang-dev \
clang \
cmake \
ninja-build \
curl \
git \
&& rm -rf /var/lib/apt/lists/*
# Install r0vm (manual build as it's portable across different host platforms)
RUN git clone --depth 1 --branch release-3.0 https://github.com/risc0/risc0.git
RUN git clone --depth 1 --branch r0.1.91.1 https://github.com/risc0/rust.git
WORKDIR /risc0
RUN cargo install --path rzup
RUN rzup build --path /rust rust --verbose
RUN cargo install --path risc0/cargo-risczero
ENV PATH="/root/.cargo/bin:/root/.risc0/bin:${PATH}"
RUN cp "$(which r0vm)" /usr/local/bin/r0vm
RUN test -x /usr/local/bin/r0vm
RUN r0vm --version
# Install logos blockchain circuits
RUN curl -sSL https://raw.githubusercontent.com/logos-blockchain/logos-blockchain/main/scripts/setup-logos-blockchain-circuits.sh | bash
WORKDIR /sequencer_runner
# Planner stage - generates dependency recipe
@ -32,14 +51,6 @@ RUN cargo build --release --bin sequencer_runner
# Strip debug symbols to reduce binary size
RUN strip /sequencer_runner/target/release/sequencer_runner
# Install r0vm
RUN curl -L https://risczero.com/install | bash
ENV PATH="/root/.cargo/bin:/root/.risc0/bin:${PATH}"
RUN rzup install
RUN cp "$(which r0vm)" /usr/local/bin/r0vm
RUN test -x /usr/local/bin/r0vm
RUN r0vm --version
# Runtime stage - minimal image
FROM debian:trixie-slim
@ -59,6 +70,9 @@ COPY --from=builder --chown=sequencer_user:sequencer_user /sequencer_runner/targ
# Copy r0vm binary from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /usr/local/bin/r0vm /usr/local/bin/r0vm
# Copy logos blockchain circuits from builder
COPY --from=builder --chown=sequencer_user:sequencer_user /root/.logos-blockchain-circuits /home/sequencer_user/.logos-blockchain-circuits
# Copy entrypoint script
COPY sequencer_runner/docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh

View File

@ -8,6 +8,15 @@
"block_create_timeout_millis": 5000,
"retry_pending_blocks_timeout_millis": 7000,
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://localhost:18080"
},
"indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
@ -155,13 +164,5 @@
37,
37,
37
],
"bedrock_config": {
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://localhost:8080",
"auth": {
"username": "user"
}
},
"indexer_rpc_url": "ws://localhost:8779"
]
}

View File

@ -7,6 +7,16 @@
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"port": 3040,
"retry_pending_blocks_timeout_millis": 7000,
"bedrock_config": {
"backoff": {
"start_delay_millis": 100,
"max_retries": 5
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://localhost:18080"
},
"indexer_rpc_url": "ws://localhost:8779",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",

View File

@ -244,7 +244,7 @@ pub async fn main_runner() -> Result<()> {
info!("Sequencer running. Monitoring concurrent tasks...");
let Err(err) = sequencer_handle.run_forever().await;
error!("Sequencer failed: {err:?}");
error!("Sequencer failed: {err:#}");
info!("Shutting down sequencer...");