Compare commits

..

No commits in common. "master" and "v0.2.3" have entirely different histories.

81 changed files with 1110 additions and 4192 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,58 +0,0 @@
name: ci / nix
permissions:
contents: read
pull-requests: read
checks: write
on:
pull_request:
branches: [master]
jobs:
build:
strategy:
fail-fast: false
matrix:
system:
- aarch64-darwin
- x86_64-linux
nixpkg:
- libsds
- libsds-android-arm64
- libsds-android-amd64
- libsds-android-x86
- libsds-android-arm
include:
- system: aarch64-darwin
runs_on: [self-hosted, macOS, ARM64]
- system: x86_64-linux
runs_on: [self-hosted, Linux, X64]
# Nimble segfaults on MacOS hosts.
exclude:
- system: aarch64-darwin
nixpkg: libsds-android-arm64
- system: aarch64-darwin
nixpkg: libsds-android-amd64
- system: aarch64-darwin
nixpkg: libsds-android-x86
- system: aarch64-darwin
nixpkg: libsds-android-arm
name: '${{ matrix.system }} / ${{ matrix.nixpkg }}'
runs-on: ${{ matrix.runs_on }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: 'Run Nix build for ${{ matrix.nixpkg }}'
shell: bash
run: |
nix build -L '.#${{ matrix.nixpkg }}' \
--print-out-paths --accept-flake-config
- name: 'Show result contents'
shell: bash
run: find result/ -type f

View File

@ -1,197 +0,0 @@
name: ci / nimble
permissions:
contents: read
pull-requests: read
checks: write
on:
pull_request:
branches: [master]
push:
branches: [master]
workflow_dispatch:
jobs:
test:
name: 'test / ${{ matrix.os }}'
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
- name: Install dependencies
run: nimble setup -l
- name: Run test suite
run: nimble test
build-linux:
name: 'linux / ${{ matrix.task }}'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
task:
- libsdsDynamicLinux
- libsdsStaticLinux
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
- name: Install dependencies
run: nimble setup -l
- name: Build ${{ matrix.task }}
run: nimble ${{ matrix.task }}
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.task }}
path: build/
build-mac:
name: 'macos / ${{ matrix.task }}'
runs-on: macos-latest
strategy:
fail-fast: false
matrix:
task:
- libsdsDynamicMac
- libsdsStaticMac
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
- name: Install dependencies
run: nimble setup -l
- name: Build ${{ matrix.task }}
run: nimble ${{ matrix.task }}
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.task }}
path: build/
build-windows:
name: 'windows / ${{ matrix.task }}'
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
task:
- libsdsDynamicWindows
- libsdsStaticWindows
steps:
- name: Enable Git long paths
run: git config --global core.longpaths true
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
# nim-lsquic (transitive dep via libp2p) contains boringssl submodule with
# deeply-nested test paths. Nimble's temp dir path + those file names exceed
# 260 chars even with core.longpaths=true because Nim's runtime doesn't use
# the \\?\ extended-path prefix. Redirect TEMP/TMP to a short root dir so
# nimble's working directory stays well under the limit.
- name: Shorten TEMP path for nimble
shell: pwsh
run: |
New-Item -ItemType Directory -Force -Path C:\T | Out-Null
"TEMP=C:\T" >> $env:GITHUB_ENV
"TMP=C:\T" >> $env:GITHUB_ENV
- name: Install dependencies
run: nimble setup -l
- name: Build ${{ matrix.task }}
run: nimble ${{ matrix.task }}
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.task }}
path: build/
build-ios:
name: 'macos / libsdsIOS'
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: maxim-lobanov/setup-xcode@v1
with:
xcode-version: '16.2'
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
- name: Install dependencies
run: nimble setup -l
- name: Build libsdsIOS
run: nimble libsdsIOS
- uses: actions/upload-artifact@v4
with:
name: libsdsIOS
path: build/
build-android:
name: 'linux / ${{ matrix.task }}'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
task: [libsdsAndroidArm64, libsdsAndroidAmd64, libsdsAndroidX86]
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: '2.2.4'
- uses: nttld/setup-ndk@v1
id: setup-ndk
with:
ndk-version: r27c
- name: Install dependencies
run: nimble setup -l
- name: Build ${{ matrix.task }}
env:
ANDROID_NDK_ROOT: ${{ steps.setup-ndk.outputs.ndk-path }}
run: nimble ${{ matrix.task }}
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.task }}
path: build/

25
.github/workflows/nix-builds.yml vendored Normal file
View File

@ -0,0 +1,25 @@
---
name: ci / nix-builds
on:
pull_request:
branches: [master]
jobs:
build:
name: Build Nix Flake packages
runs-on: [self-hosted, Linux]
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Build library
shell: bash
run: |
nix build '.?submodules=1#libsds' \
--print-out-paths --accept-flake-config
- name: Build Android library
shell: bash
run: |
nix build '.?submodules=1#libsds-android-arm64' \
--print-out-paths --accept-flake-config

6
.gitignore vendored
View File

@ -10,8 +10,8 @@ result
sds.nims sds.nims
/.update.timestamp /.update.timestamp
# Nimbus Build System
nimbus-build-system.paths
# Nimble packages # Nimble packages
/vendor/.nimble /vendor/.nimble
nimble.develop
nimble.paths
nimbledeps

55
.gitmodules vendored Normal file
View File

@ -0,0 +1,55 @@
[submodule "vendor/nimbus-build-system"]
path = vendor/nimbus-build-system
url = https://github.com/status-im/nimbus-build-system.git
ignore = untracked
branch = master
[submodule "vendor/nim-chronos"]
path = vendor/nim-chronos
url = https://github.com/status-im/nim-chronos.git
ignore = untracked
branch = master
[submodule "vendor/nim-results"]
path = vendor/nim-results
url = https://github.com/arnetheduck/nim-results.git
ignore = untracked
branch = master
[submodule "vendor/nim-stew"]
path = vendor/nim-stew
url = https://github.com/status-im/nim-stew.git
ignore = untracked
branch = master
[submodule "vendor/nim-chronicles"]
path = vendor/nim-chronicles
url = https://github.com/status-im/nim-chronicles.git
ignore = untracked
branch = master
[submodule "vendor/nim-faststreams"]
path = vendor/nim-faststreams
url = https://github.com/status-im/nim-faststreams.git
ignore = untracked
branch = master
[submodule "vendor/nim-json-serialization"]
path = vendor/nim-json-serialization
url = https://github.com/status-im/nim-json-serialization.git
ignore = untracked
branch = master
[submodule "vendor/nim-serialization"]
path = vendor/nim-serialization
url = https://github.com/status-im/nim-serialization.git
ignore = untracked
branch = master
[submodule "vendor/nim-taskpools"]
path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git
ignore = untracked
branch = master
[submodule "vendor/nim-confutils"]
path = vendor/nim-confutils
url = https://github.com/status-im/nim-confutils.git
ignore = untracked
branch = master
[submodule "vendor/nim-libp2p"]
path = vendor/nim-libp2p
url = https://github.com/vacp2p/nim-libp2p.git
ignore = untracked
branch = master

164
CLAUDE.md
View File

@ -1,164 +0,0 @@
# nim-sds — Claude Code Guide
## Project Overview
**nim-sds** is a Nim implementation of the **Scalable Data Sync (SDS)** protocol ([spec](https://lip.logos.co/ift-ts/raw/sds.html), IFT LIP-109). SDS achieves end-to-end reliability when consolidating distributed logs in a decentralized manner — participants broadcast messages over a P2P transport, maintain per-channel append-only logs, and use causal ordering to reach consistent state across all nodes.
The library exposes its functionality via a C-compatible FFI so it can be embedded in applications on any platform. Go bindings are maintained in a separate repo: [logos-messaging/sds-go-bindings](https://github.com/logos-messaging/sds-go-bindings).
---
## Protocol Concepts
Understanding these is essential before modifying any core code.
### Message format
Each SDS message carries (`sds/message.nim`, `sds/protobuf.nim`):
| Field | Purpose |
|---|---|
| `message_id` | Globally unique, immutable identifier |
| `sender_id` | Originating participant |
| `channel_id` | Communication channel |
| `lamport_timestamp` | Logical clock for ordering |
| `causal_history` | IDs of the 23 most recent messages the sender has seen (dependencies) |
| `bloom_filter` | Compact summary of all message IDs the sender has received |
| `content` | Application payload |
### Sending a message (`sds/sds_utils.nim``wrapOutgoingMessage`)
1. Increment the per-channel Lamport timestamp to `max(current_time_ms, timestamp + 1)`.
2. Attach causal history from the local log tail.
3. Embed the current bloom filter snapshot.
### Receiving a message (`sds/sds_utils.nim``unwrapReceivedMessage`)
1. Deduplicate by `message_id`.
2. Check causal dependencies — if any predecessor is missing, buffer the message.
3. When all dependencies are met, deliver: insert into the ordered local log (Lamport timestamp, tie-break by ascending `message_id`).
4. Record `message_id` in the bloom filter.
### Periodic sync
A node periodically broadcasts a message with empty content carrying an updated Lamport timestamp and bloom filter. These sync messages are not persisted and are excluded from causal chains. They help peers detect gaps in their logs.
### SDS-R (Repair extension)
Defined in the spec but **not yet implemented** in this library.
### Bloom filter (`sds/bloom.nim`, `sds/rolling_bloom_filter.nim`)
Used to compactly summarise which messages a node has received, so peers can identify gaps without exchanging full ID lists. The rolling variant automatically resets when capacity is exceeded.
---
## Repository Layout
```
sds/ # Core protocol (pure Nim, no FFI)
message.nim # SdsMessage, HistoryEntry, config constants
sds_utils.nim # ReliabilityManager — send/receive/buffer logic
protobuf.nim # Protobuf encode/decode for SdsMessage
protobufutil.nim # Low-level protobuf helpers
bloom.nim # Bloom filter implementation
rolling_bloom_filter.nim # Adaptive rolling bloom filter
library/ # C FFI wrapper around the core
libsds.nim # Exported C-compatible entry points
libsds.h # C header
ffi_types.nim # C-compatible types and return codes
alloc.nim # Memory allocation helpers
sds_thread/ # Per-context Chronos async worker thread
events/ # JSON serialisation for event callbacks
tests/
test_bloom.nim # Bloom filter unit tests
test_reliability.nim # Protocol-level unit tests
sds.nim # Root module — re-exports public API
sds.nimble # Package manifest + build tasks
flake.nix / Makefile # Reproducible cross-platform build system
```
---
## Key Types
| Type | File | Role |
|---|---|---|
| `ReliabilityManager` | `sds_utils.nim` | Per-channel protocol state: Lamport clock, bloom filter, log, buffers |
| `ReliabilityConfig` | `sds_utils.nim` | Tunable parameters (bloom capacity, history length, resend interval) |
| `SdsMessage` | `message.nim` | Wire message |
| `HistoryEntry` | `message.nim` | `message_id` + optional retrieval hint |
| `UnacknowledgedMessage` | `message.nim` | Outgoing message with resend counter |
| `IncomingMessage` | `message.nim` | Buffered message waiting on missing dependencies |
---
## FFI API (`library/libsds.nim`)
The C API wraps `ReliabilityManager` behind an opaque `SdsContext` handle:
| Export | Maps to |
|---|---|
| `SdsNewReliabilityManager` | Create context |
| `SdsWrapOutgoingMessage` | `wrapOutgoingMessage` |
| `SdsUnwrapReceivedMessage` | `unwrapReceivedMessage` |
| `SdsMarkDependenciesMet` | Notify buffered-message dependencies satisfied |
| `SdsSetEventCallback` | Register event handler (JSON payloads) |
| `SdsSetRetrievalHintProvider` | Register hint-provider callback |
| `SdsStartPeriodicTasks` | Start periodic sync loop |
| `SdsCleanupReliabilityManager` | Free context |
| `SdsResetReliabilityManager` | Reset state without freeing |
Each `SdsContext` runs a dedicated Chronos async loop on a worker thread; application threads communicate with it via SPSC channels.
---
## Running Tests
```bash
nimble test
```
Nix can also provide the environment if a local Nim install is not available:
```bash
nix develop --command nimble test
```
---
## Code Conventions
- **Types**: PascalCase (`ReliabilityManager`, `SdsMessage`)
- **Variables/procs**: camelCase
- **Public exports**: trailing `*`
- **Errors**: `Result[T, ReliabilityError]` — use `.valueOr`, `.isOk()`, `.isErr()`
- **Locks**: `withLock` macro (RAII); all exported procs are `{.gcsafe.}`
- **Constants**: `Default` prefix (e.g., `DefaultBloomFilterCapacity`)
- **Backward compat**: `sds/protobuf.nim` supports old and new causal history formats — do not remove the legacy decode path
---
## Building
**Nimble** is the primary build tool. Desktop library targets:
```bash
nimble libsdsDynamicMac # macOS .dylib
nimble libsdsDynamicLinux # Linux .so
nimble libsdsStaticMac # macOS .a
nimble libsdsStaticLinux # Linux .a
```
**Nix** (`flake.nix`) and **Make** (`Makefile`) are optional conveniences that wrap Nimble for reproducible and cross-platform (including Android/iOS) builds.
## Dependency Management
Nimble dependencies are locked in `nimble.lock`.
```bash
nimble setup -l # local setup
nimble lock # update lock after changing sds.nimble
```
If using Nix, also recalculate the fixed-output hash in `nix/deps.nix` after updating `nimble.lock` (run `nix build`, copy the expected hash from the error, paste into `outputHash`).

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright © 2025-2026 Logos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

180
Makefile Normal file
View File

@ -0,0 +1,180 @@
.PHONY: libsds deps
export BUILD_SYSTEM_DIR := vendor/nimbus-build-system
LINK_PCRE := 0
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
nimbus-build-system-nimble-dir:
NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \
PWD_CMD="$(PWD)" \
$(CURDIR)/scripts/generate_nimble_links.sh
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
.DEFAULT:
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
$(GIT_SUBMODULE_UPDATE); \
echo
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
#
# After restarting, it will execute its original goal, so we don't have to start a child Make here
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
else # "variables.mk" was included. Business as usual until the end of this file.
# default target, because it's the first one that doesn't start with '.'
all: | libsds
sds.nims:
ln -s sds.nimble $@
update: | update-common
rm -rf sds.nims && \
$(MAKE) sds.nims $(HANDLE_OUTPUT)
clean:
rm -rf build
deps: | deps-common sds.nims
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
## Git version
GIT_VERSION ?= $(shell git describe --abbrev=6 --always --tags)
## Compilation parameters. If defined in the CLI the assignments won't be executed
NIM_PARAMS := $(NIM_PARAMS) -d:git_version=\"$(GIT_VERSION)\"
ifeq ($(DEBUG), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:release
else
NIM_PARAMS := $(NIM_PARAMS) -d:debug
endif
STATIC ?= 0
detected_OS ?= Linux
ifeq ($(OS),Windows_NT)
detected_OS := Windows
else
detected_OS := $(shell uname -s)
endif
BUILD_COMMAND ?= libsdsDynamic
ifeq ($(STATIC), 1)
BUILD_COMMAND = libsdsStatic
endif
ifeq ($(detected_OS),Windows)
BUILD_COMMAND := $(BUILD_COMMAND)Windows
else ifeq ($(detected_OS),Darwin)
BUILD_COMMAND := $(BUILD_COMMAND)Mac
export IOS_SDK_PATH := $(shell xcrun --sdk iphoneos --show-sdk-path)
else ifeq ($(detected_OS),Linux)
BUILD_COMMAND := $(BUILD_COMMAND)Linux
endif
libsds: | deps
$(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) sds.nims
#####################
## Mobile Bindings ##
#####################
.PHONY: libsds-android \
libsds-android-precheck \
libsds-android-arm64 \
libsds-android-amd64 \
libsds-android-x86 \
libsds-android-arm \
build-libsds-for-android-arch
ANDROID_TARGET ?= 30
ifeq ($(detected_OS),Darwin)
ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/darwin-x86_64
else
ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/linux-x86_64
endif
# Fixes "clang: not found" errors
PATH := $(ANDROID_TOOLCHAIN_DIR)/bin:$(PATH)
libsds-android-precheck:
ifndef ANDROID_NDK_ROOT
$(error ANDROID_NDK_ROOT is not set)
endif
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passC="--sysroot=$(ANDROID_TOOLCHAIN_DIR)/sysroot"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passL="--sysroot=$(ANDROID_TOOLCHAIN_DIR)/sysroot"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passC="--target=$(ANDROID_ARCH)$(ANDROID_TARGET)"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passL="--target=$(ANDROID_ARCH)$(ANDROID_TARGET)"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passC="-I$(ANDROID_TOOLCHAIN_DIR)/sysroot/usr/include"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passC="-I$(ANDROID_TOOLCHAIN_DIR)/sysroot/usr/include/$(ARCH_DIRNAME)"
build-libsds-for-android-arch: NIM_PARAMS := $(NIM_PARAMS) --passL="-L$(ANDROID_TOOLCHAIN_DIR)/sysroot/usr/lib/$(ARCH_DIRNAME)/$(ANDROID_TARGET)"
build-libsds-for-android-arch:
CC=$(ANDROID_TOOLCHAIN_DIR)/bin/$(ANDROID_ARCH)$(ANDROID_TARGET)-clang \
CPU=$(CPU) ABIDIR=$(ABIDIR) \
ARCH_DIRNAME=$(ARCH_DIRNAME) \
ANDROID_ARCH=$(ANDROID_ARCH) \
ANDROID_TOOLCHAIN_DIR=$(ANDROID_TOOLCHAIN_DIR) \
$(ENV_SCRIPT) \
nim libsdsAndroid $(NIM_PARAMS) sds.nims
libsds-android-arm64: ANDROID_ARCH=aarch64-linux-android
libsds-android-arm64: CPU=arm64
libsds-android-arm64: ABIDIR=arm64-v8a
libsds-android-arm64: ARCH_DIRNAME=aarch64-linux-android
libsds-android-arm64: | libsds-android-precheck build deps
$(MAKE) build-libsds-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) \
CPU=$(CPU) ABIDIR=$(ABIDIR) ARCH_DIRNAME=$(ARCH_DIRNAME)
libsds-android-amd64: ANDROID_ARCH=x86_64-linux-android
libsds-android-amd64: CPU=amd64
libsds-android-amd64: ABIDIR=x86_64
libsds-android-amd64: ARCH_DIRNAME=x86_64-linux-android
libsds-android-amd64: | libsds-android-precheck build deps
$(MAKE) build-libsds-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) \
CPU=$(CPU) ABIDIR=$(ABIDIR) ARCH_DIRNAME=$(ARCH_DIRNAME)
libsds-android-x86: ANDROID_ARCH=i686-linux-android
libsds-android-x86: CPU=i386
libsds-android-x86: ABIDIR=x86
libsds-android-x86: ARCH_DIRNAME=i686-linux-android
libsds-android-x86: | libsds-android-precheck build deps
$(MAKE) build-libsds-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) \
CPU=$(CPU) ABIDIR=$(ABIDIR) ARCH_DIRNAME=$(ARCH_DIRNAME)
libsds-android-arm: ANDROID_ARCH=armv7a-linux-androideabi
libsds-android-arm: CPU=arm
libsds-android-arm: ABIDIR=armeabi-v7a
libsds-android-arm: ARCH_DIRNAME=arm-linux-androideabi
libsds-android-arm: | libsds-android-precheck build deps
# cross-rs target architecture name does not match the one used in android
$(MAKE) build-libsds-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) \
CPU=$(CPU) ABIDIR=$(ABIDIR) ARCH_DIRNAME=$(ARCH_DIRNAME) \
libsds-android:
ifeq ($(ARCH),arm64)
$(MAKE) libsds-android-arm64
else ifeq ($(ARCH),amd64)
$(MAKE) libsds-android-amd64
else ifeq ($(ARCH),x86)
$(MAKE) libsds-android-x86
# else ifeq ($(ARCH),arm)
# $(MAKE) libsds-android-arm
# This target is disabled because on recent versions of cross-rs complain with the following error
# relocation R_ARM_THM_ALU_PREL_11_0 cannot be used against symbol 'stack_init_trampoline_return'; recompile with -fPIC
# It's likely this architecture is not used so we might just not support it.
else
$(error Unsupported ARCH '$(ARCH)'. Please set ARCH to one of: arm64, arm, amd64, x86)
endif
endif
# Target iOS
libsds-ios: | deps
$(ENV_SCRIPT) nim libsdsIOS $(NIM_PARAMS) sds.nims

125
README.md
View File

@ -1,128 +1,49 @@
# nim-sds # nim-e2e-reliability
Nim implementation of the e2e reliability protocol
Nim implementation of the e2e reliability protocol.
## Prerequisites
- [Nix](https://nixos.org/download/) package manager
## Quick start
```bash
git clone https://github.com/logos-messaging/nim-sds.git
cd nim-sds
# Build the shared library
nix build '.#libsds'
# Run tests
nix develop --command nimble test
```
## Building ## Building
### Desktop ### Nix
```bash ```bash
nix build --print-out-paths '.#libsds' nix build --print-out-paths '.?submodules=1#libsds'
nix build --print-out-paths '.?submodules=1#libsds-android-arm64'
```
### Windows, Linux or MacOS
```code
make libsds
``` ```
### Android ### Android
```bash Download the latest Android NDK. For example, on Ubuntu with Intel:
nix build --print-out-paths '.#libsds-android-arm64'
nix build --print-out-paths '.#libsds-android-amd64'
nix build --print-out-paths '.#libsds-android-x86'
nix build --print-out-paths '.#libsds-android-arm'
```
### iOS ```code
```bash
nix build --print-out-paths '.#libsds-ios'
```
<details>
<summary>Development shell</summary>
Enter the dev shell:
```bash
nix develop
```
Build using nimble tasks:
```bash
# Dynamic library (auto-detects OS)
nimble libsdsDynamicMac # macOS
nimble libsdsDynamicLinux # Linux
nimble libsdsDynamicWindows # Windows
# Static library
nimble libsdsStaticMac # macOS
nimble libsdsStaticLinux # Linux
nimble libsdsStaticWindows # Windows
```
Run tests:
```bash
nimble test
```
The built library is output to `build/`.
</details>
<details>
<summary>Android (without Nix)</summary>
Download the latest Android NDK:
```bash
cd ~ cd ~
wget https://dl.google.com/android/repository/android-ndk-r27c-linux.zip wget https://dl.google.com/android/repository/android-ndk-r27c-linux.zip
```
```code
unzip android-ndk-r27c-linux.zip unzip android-ndk-r27c-linux.zip
``` ```
Add to `~/.bashrc`: Then, add the following to your ~/.bashrc file:
```bash ```code
export ANDROID_NDK_ROOT=$HOME/android-ndk-r27c export ANDROID_NDK_ROOT=$HOME/android-ndk-r27c
export PATH=$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH export PATH=$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
``` ```
Then build: Then, use one of the following commands, according to the current architecture:
```bash
ARCH=arm64 nimble libsdsAndroid
```
| Architecture | Command | | Architecture | command |
| ------------ | ------- | | ------------ | ------- |
| arm64 | `ARCH=arm64 nimble libsdsAndroid` | | arm64 | `make libsds-android ARCH=arm64` |
| amd64 | `ARCH=amd64 nimble libsdsAndroid` | | amd64 | `make libsds-android ARCH=amd64` |
| x86 | `ARCH=x86 nimble libsdsAndroid` | | x86 | `make libsds-android ARCH=x86` |
The library is output to `build/libsds.so`. At the end of the process, the library will be created in build/libsds.so
</details>
<details>
<summary>Dependency management</summary>
Dependencies are managed by [Nimble](https://github.com/nim-lang/nimble) and pinned via `nimble.lock`.
To set up dependencies locally:
```bash
nimble setup -l
```
To update dependencies:
```bash
nimble lock
```
After updating `nimble.lock`, the Nix `outputHash` in `nix/deps.nix` must be recalculated
by running `nix build` and updating the hash from the error output.
</details>
## License
Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or [MIT license](LICENSE-MIT) at your option.

View File

@ -1,5 +0,0 @@
# begin Nimble config (version 2)
--noNimblePath
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

8
env.sh Normal file
View File

@ -0,0 +1,8 @@
#!/bin/bash
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

8
flake.lock generated
View File

@ -2,17 +2,17 @@
"nodes": { "nodes": {
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1770464364, "lastModified": 1757590060,
"narHash": "sha256-z5NJPSBwsLf/OfD8WTmh79tlSU8XgIbwmk6qB1/TFzY=", "narHash": "sha256-EWwwdKLMZALkgHFyKW7rmyhxECO74+N+ZO5xTDnY/5c=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "23d72dabcb3b12469f57b37170fcbc1789bd7457", "rev": "0ef228213045d2cdb5a169a95d63ded38670b293",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "23d72dabcb3b12469f57b37170fcbc1789bd7457", "rev": "0ef228213045d2cdb5a169a95d63ded38670b293",
"type": "github" "type": "github"
} }
}, },

View File

@ -7,9 +7,7 @@
}; };
inputs = { inputs = {
# We are pinning the commit because ultimately we want to use same commit across different projects. nixpkgs.url = "github:NixOS/nixpkgs?rev=0ef228213045d2cdb5a169a95d63ded38670b293";
# A commit from nixpkgs 25.11 release : https://github.com/NixOS/nixpkgs/tree/release-25.11
nixpkgs.url = "github:NixOS/nixpkgs?rev=23d72dabcb3b12469f57b37170fcbc1789bd7457";
}; };
outputs = { self, nixpkgs }: outputs = { self, nixpkgs }:
@ -42,48 +40,39 @@
in rec { in rec {
packages = forAllSystems (system: let packages = forAllSystems (system: let
pkgs = pkgsFor.${system}; pkgs = pkgsFor.${system};
targets = builtins.filter
buildTargets = pkgs.callPackage ./nix/default.nix { (t: !(pkgs.stdenv.isDarwin && builtins.match "libsds-android.*" t != null))
[
"libsds-android-arm64"
"libsds-android-amd64"
"libsds-android-x86"
"libsds-android-arm"
];
in rec {
# non-Android package
libsds = pkgs.callPackage ./nix/default.nix {
inherit stableSystems;
src = self; src = self;
targets = [ "libsds" ];
}; };
# All potential targets — must match nimble task names in sds.nimble. default = libsds;
allTargets = [
"libsds"
"libsdsAndroidArm64"
"libsdsAndroidAmd64"
"libsdsAndroidX86"
"libsdsAndroidArm"
"libsdsIOS"
];
# Create a package for each target
allPackages = builtins.listToAttrs (map (t: {
name = t;
value = buildTargets.override { targets = [ t ]; };
}) allTargets);
in
allPackages // {
default = allPackages.libsds;
# Convenience aliases matching old hyphenated names.
libsds-android-arm64 = allPackages.libsdsAndroidArm64;
libsds-android-amd64 = allPackages.libsdsAndroidAmd64;
libsds-android-x86 = allPackages.libsdsAndroidX86;
libsds-android-arm = allPackages.libsdsAndroidArm;
libsds-ios = allPackages.libsdsIOS;
} }
); # Generate a package for each target dynamically
// builtins.listToAttrs (map (name: {
devShells = forAllSystems (system: inherit name;
let pkgs = pkgsFor.${system}; in { value = pkgs.callPackage ./nix/default.nix {
default = pkgs.mkShell { inherit stableSystems;
nativeBuildInputs = with pkgs; [ src = self;
nim-2_2 targets = [ name ];
nimble
];
}; };
} }) targets));
);
devShells = forAllSystems (system: let
pkgs = pkgsFor.${system};
in {
default = pkgs.callPackage ./nix/shell.nix { } ;
});
}; };
} }

3
go.mod Normal file
View File

@ -0,0 +1,3 @@
module sds-bindings
go 1.22.5

View File

@ -1,5 +1,5 @@
import std/json import std/json
import ./json_base_event, sds/[message] import ./json_base_event, ../../src/[message]
type JsonMessageReadyEvent* = ref object of JsonEvent type JsonMessageReadyEvent* = ref object of JsonEvent
messageId*: SdsMessageID messageId*: SdsMessageID

View File

@ -1,5 +1,5 @@
import std/json import std/json
import ./json_base_event, sds/[message] import ./json_base_event, ../../src/[message]
type JsonMessageSentEvent* = ref object of JsonEvent type JsonMessageSentEvent* = ref object of JsonEvent
messageId*: SdsMessageID messageId*: SdsMessageID

View File

@ -1,15 +1,15 @@
import std/json import std/json
import ./json_base_event, sds/[message], std/base64 import ./json_base_event, ../../src/[message]
type JsonMissingDependenciesEvent* = ref object of JsonEvent type JsonMissingDependenciesEvent* = ref object of JsonEvent
messageId*: SdsMessageID messageId*: SdsMessageID
missingDeps*: seq[HistoryEntry] missingDeps: seq[SdsMessageID]
channelId*: SdsChannelID channelId*: SdsChannelID
proc new*( proc new*(
T: type JsonMissingDependenciesEvent, T: type JsonMissingDependenciesEvent,
messageId: SdsMessageID, messageId: SdsMessageID,
missingDeps: seq[HistoryEntry], missingDeps: seq[SdsMessageID],
channelId: SdsChannelID, channelId: SdsChannelID,
): T = ): T =
return JsonMissingDependenciesEvent( return JsonMissingDependenciesEvent(
@ -17,15 +17,4 @@ proc new*(
) )
method `$`*(jsonMissingDependencies: JsonMissingDependenciesEvent): string = method `$`*(jsonMissingDependencies: JsonMissingDependenciesEvent): string =
var node = newJObject() $(%*jsonMissingDependencies)
node["eventType"] = %*jsonMissingDependencies.eventType
node["messageId"] = %*jsonMissingDependencies.messageId
node["channelId"] = %*jsonMissingDependencies.channelId
var missingDepsNode = newJArray()
for dep in jsonMissingDependencies.missingDeps:
var depNode = newJObject()
depNode["messageId"] = %*dep.messageId
depNode["retrievalHint"] = %*encode(dep.retrievalHint)
missingDepsNode.add(depNode)
node["missingDeps"] = missingDepsNode
$node

View File

@ -1,20 +0,0 @@
import std/[json, base64]
import ./json_base_event, sds/[message]
type JsonRepairReadyEvent* = ref object of JsonEvent
channelId*: SdsChannelID
message*: seq[byte]
proc new*(
T: type JsonRepairReadyEvent, message: seq[byte], channelId: SdsChannelID
): T =
return JsonRepairReadyEvent(
eventType: "repair_ready", message: message, channelId: channelId
)
method `$`*(jsonRepairReady: JsonRepairReadyEvent): string =
var node = newJObject()
node["eventType"] = %*jsonRepairReady.eventType
node["channelId"] = %*jsonRepairReady.channelId
node["message"] = %*encode(jsonRepairReady.message)
$node

View File

@ -5,10 +5,6 @@ type SdsCallBack* = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].} ) {.cdecl, gcsafe, raises: [].}
type SdsRetrievalHintProvider* = proc(
messageId: cstring, hint: ptr cstring, hintLen: ptr csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].}
const RET_OK*: cint = 0 const RET_OK*: cint = 0
const RET_ERR*: cint = 1 const RET_ERR*: cint = 1
const RET_MISSING_CALLBACK*: cint = 2 const RET_MISSING_CALLBACK*: cint = 2

View File

@ -20,8 +20,6 @@ extern "C" {
typedef void (*SdsCallBack) (int callerRet, const char* msg, size_t len, void* userData); typedef void (*SdsCallBack) (int callerRet, const char* msg, size_t len, void* userData);
typedef void (*SdsRetrievalHintProvider) (const char* messageId, char** hint, size_t* hintLen, void* userData);
// --- Core API Functions --- // --- Core API Functions ---
@ -30,8 +28,6 @@ void* SdsNewReliabilityManager(SdsCallBack callback, void* userData);
void SdsSetEventCallback(void* ctx, SdsCallBack callback, void* userData); void SdsSetEventCallback(void* ctx, SdsCallBack callback, void* userData);
void SdsSetRetrievalHintProvider(void* ctx, SdsRetrievalHintProvider callback, void* userData);
int SdsCleanupReliabilityManager(void* ctx, SdsCallBack callback, void* userData); int SdsCleanupReliabilityManager(void* ctx, SdsCallBack callback, void* userData);
int SdsResetReliabilityManager(void* ctx, SdsCallBack callback, void* userData); int SdsResetReliabilityManager(void* ctx, SdsCallBack callback, void* userData);

View File

@ -5,7 +5,7 @@
when defined(linux): when defined(linux):
{.passl: "-Wl,-soname,libsds.so".} {.passl: "-Wl,-soname,libsds.so".}
import std/[typetraits, tables, atomics, locks], chronos, chronicles import std/[typetraits, tables, atomics], chronos, chronicles
import import
./sds_thread/sds_thread, ./sds_thread/sds_thread,
./alloc, ./alloc,
@ -13,10 +13,10 @@ import
./sds_thread/inter_thread_communication/sds_thread_request, ./sds_thread/inter_thread_communication/sds_thread_request,
./sds_thread/inter_thread_communication/requests/ ./sds_thread/inter_thread_communication/requests/
[sds_lifecycle_request, sds_message_request, sds_dependencies_request], [sds_lifecycle_request, sds_message_request, sds_dependencies_request],
sds, ../src/[reliability_utils, message],
./events/[ ./events/[
json_message_ready_event, json_message_sent_event, json_missing_dependencies_event, json_message_ready_event, json_message_sent_event, json_missing_dependencies_event,
json_periodic_sync_event, json_repair_ready_event, json_periodic_sync_event,
] ]
################################################################################ ################################################################################
@ -57,29 +57,6 @@ template callEventCallback(ctx: ptr SdsContext, eventName: string, body: untyped
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
) )
var
ctxPool: seq[ptr SdsContext]
ctxPoolLock: Lock
proc acquireCtx(callback: SdsCallBack, userData: pointer): ptr SdsContext =
ctxPoolLock.acquire()
defer: ctxPoolLock.release()
if ctxPool.len > 0:
result = ctxPool.pop()
else:
result = sds_thread.createSdsThread().valueOr:
let msg = "Error in createSdsThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
proc releaseCtx(ctx: ptr SdsContext) =
ctxPoolLock.acquire()
defer: ctxPoolLock.release()
ctx.userData = nil
ctx.eventCallback = nil
ctx.eventUserData = nil
ctxPool.add(ctx)
proc handleRequest( proc handleRequest(
ctx: ptr SdsContext, ctx: ptr SdsContext,
requestType: RequestType, requestType: RequestType,
@ -105,7 +82,7 @@ proc onMessageSent(ctx: ptr SdsContext): MessageSentCallback =
$JsonMessageSentEvent.new(messageId, channelId) $JsonMessageSentEvent.new(messageId, channelId)
proc onMissingDependencies(ctx: ptr SdsContext): MissingDependenciesCallback = proc onMissingDependencies(ctx: ptr SdsContext): MissingDependenciesCallback =
return proc(messageId: SdsMessageID, missingDeps: seq[HistoryEntry], channelId: SdsChannelID) {.gcsafe.} = return proc(messageId: SdsMessageID, missingDeps: seq[SdsMessageID], channelId: SdsChannelID) {.gcsafe.} =
callEventCallback(ctx, "onMissingDependencies"): callEventCallback(ctx, "onMissingDependencies"):
$JsonMissingDependenciesEvent.new(messageId, missingDeps, channelId) $JsonMissingDependenciesEvent.new(messageId, missingDeps, channelId)
@ -114,30 +91,6 @@ proc onPeriodicSync(ctx: ptr SdsContext): PeriodicSyncCallback =
callEventCallback(ctx, "onPeriodicSync"): callEventCallback(ctx, "onPeriodicSync"):
$JsonPeriodicSyncEvent.new() $JsonPeriodicSyncEvent.new()
proc onRepairReady(ctx: ptr SdsContext): RepairReadyCallback =
return proc(message: seq[byte], channelId: SdsChannelID) {.gcsafe.} =
callEventCallback(ctx, "onRepairReady"):
$JsonRepairReadyEvent.new(message, channelId)
proc onRetrievalHint(ctx: ptr SdsContext): RetrievalHintProvider =
return proc(messageId: SdsMessageID): seq[byte] {.gcsafe.} =
if isNil(ctx.retrievalHintProvider):
return @[]
var hint: cstring
var hintLen: csize_t
cast[SdsRetrievalHintProvider](ctx.retrievalHintProvider)(
messageId.cstring, addr hint, addr hintLen, ctx.retrievalHintUserData
)
if not isNil(hint) and hintLen > 0:
var hintBytes = newSeq[byte](hintLen)
copyMem(addr hintBytes[0], hint, hintLen)
deallocShared(hint)
return hintBytes
return @[]
### End of not-exported components ### End of not-exported components
################################################################################ ################################################################################
@ -164,7 +117,6 @@ proc initializeLibrary() {.exported.} =
## Every Nim library needs to call `<yourprefix>NimMain` once exactly, to initialize the Nim runtime. ## Every Nim library needs to call `<yourprefix>NimMain` once exactly, to initialize the Nim runtime.
## Being `<yourprefix>` the value given in the optional compilation flag --nimMainPrefix:yourprefix ## Being `<yourprefix>` the value given in the optional compilation flag --nimMainPrefix:yourprefix
libsdsNimMain() libsdsNimMain()
ctxPoolLock.initLock() # ensure the lock is initialized once (fix Windows crash)
when declared(setupForeignThreadGc): when declared(setupForeignThreadGc):
setupForeignThreadGc() setupForeignThreadGc()
when declared(nimGC_setStackBottom): when declared(nimGC_setStackBottom):
@ -188,9 +140,10 @@ proc SdsNewReliabilityManager(
echo "error: missing callback in NewReliabilityManager" echo "error: missing callback in NewReliabilityManager"
return nil return nil
## Create or reuse the SDS thread that will keep waiting for req from the main thread. ## Create the SDS thread that will keep waiting for req from the main thread.
var ctx = acquireCtx(callback, userData) var ctx = sds_thread.createSdsThread().valueOr:
if ctx.isNil(): let msg = "Error in createSdsThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil return nil
ctx.userData = userData ctx.userData = userData
@ -200,8 +153,6 @@ proc SdsNewReliabilityManager(
messageSentCb: onMessageSent(ctx), messageSentCb: onMessageSent(ctx),
missingDependenciesCb: onMissingDependencies(ctx), missingDependenciesCb: onMissingDependencies(ctx),
periodicSyncCb: onPeriodicSync(ctx), periodicSyncCb: onPeriodicSync(ctx),
retrievalHintProvider: onRetrievalHint(ctx),
repairReadyCb: onRepairReady(ctx),
) )
let retCode = handleRequest( let retCode = handleRequest(
@ -226,33 +177,20 @@ proc SdsSetEventCallback(
ctx[].eventCallback = cast[pointer](callback) ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData ctx[].eventUserData = userData
proc SdsSetRetrievalHintProvider(
ctx: ptr SdsContext, callback: SdsRetrievalHintProvider, userData: pointer
) {.dynlib, exportc.} =
initializeLibrary()
ctx[].retrievalHintProvider = cast[pointer](callback)
ctx[].retrievalHintUserData = userData
proc SdsCleanupReliabilityManager( proc SdsCleanupReliabilityManager(
ctx: ptr SdsContext, callback: SdsCallBack, userData: pointer ctx: ptr SdsContext, callback: SdsCallBack, userData: pointer
): cint {.dynlib, exportc.} = ): cint {.dynlib, exportc.} =
initializeLibrary() initializeLibrary()
checkLibsdsParams(ctx, callback, userData) checkLibsdsParams(ctx, callback, userData)
let resetRes = handleRequest( sds_thread.destroySdsThread(ctx).isOkOr:
ctx, let msg = "libsds error: " & $error
RequestType.LIFECYCLE, callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
SdsLifecycleRequest.createShared(SdsLifecycleMsgType.RESET_RELIABILITY_MANAGER),
callback,
userData,
)
if resetRes == RET_ERR:
return RET_ERR return RET_ERR
releaseCtx(ctx) ## always need to invoke the callback although we don't retrieve value to the caller
callback(RET_OK, nil, 0, userData)
# handleRequest already invoked the callback; nothing else to signal here.
return RET_OK return RET_OK
proc SdsResetReliabilityManager( proc SdsResetReliabilityManager(

View File

@ -1 +0,0 @@
path = "../"

View File

@ -1,8 +1,8 @@
import std/[json, strutils, net, sequtils] import std/[json, strutils, net, sequtils]
import chronos, chronicles, results import chronos, chronicles, results
import library/alloc import ../../../alloc
import sds import ../../../../src/[reliability_utils, reliability]
type SdsDependenciesMsgType* = enum type SdsDependenciesMsgType* = enum
MARK_DEPENDENCIES_MET MARK_DEPENDENCIES_MET

View File

@ -1,8 +1,8 @@
import std/json import std/json
import chronos, chronicles, results import chronos, chronicles, results
import library/alloc import ../../../alloc
import sds import ../../../../src/[reliability_utils, reliability]
type SdsLifecycleMsgType* = enum type SdsLifecycleMsgType* = enum
CREATE_RELIABILITY_MANAGER CREATE_RELIABILITY_MANAGER
@ -40,7 +40,6 @@ proc createReliabilityManager(
rm.setCallbacks( rm.setCallbacks(
appCallbacks.messageReadyCb, appCallbacks.messageSentCb, appCallbacks.messageReadyCb, appCallbacks.messageSentCb,
appCallbacks.missingDependenciesCb, appCallbacks.periodicSyncCb, appCallbacks.missingDependenciesCb, appCallbacks.periodicSyncCb,
appCallbacks.retrievalHintProvider, appCallbacks.repairReadyCb,
) )
return ok(rm) return ok(rm)

View File

@ -1,8 +1,8 @@
import std/[json, strutils, net, sequtils, base64] import std/[json, strutils, net, sequtils]
import chronos, chronicles, results import chronos, chronicles, results
import library/alloc import ../../../alloc
import sds import ../../../../src/[reliability_utils, reliability, message]
type SdsMessageMsgType* = enum type SdsMessageMsgType* = enum
WRAP_MESSAGE WRAP_MESSAGE
@ -17,7 +17,7 @@ type SdsMessageRequest* = object
type SdsUnwrapResponse* = object type SdsUnwrapResponse* = object
message*: seq[byte] message*: seq[byte]
missingDeps*: seq[HistoryEntry] missingDeps*: seq[SdsMessageID]
channelId*: string channelId*: string
proc createShared*( proc createShared*(
@ -62,22 +62,12 @@ proc process*(
of UNWRAP_MESSAGE: of UNWRAP_MESSAGE:
let messageBytes = self.message.toSeq() let messageBytes = self.message.toSeq()
let (unwrappedMessage, missingDeps, extractedChannelId) = unwrapReceivedMessage(rm[], messageBytes).valueOr: let (unwrappedMessage, missingDeps, channelId) = unwrapReceivedMessage(rm[], messageBytes).valueOr:
return err("error processing UNWRAP_MESSAGE request: " & $error) return err("error processing UNWRAP_MESSAGE request: " & $error)
let res = SdsUnwrapResponse(message: unwrappedMessage, missingDeps: missingDeps, channelId: extractedChannelId) let res = SdsUnwrapResponse(message: unwrappedMessage, missingDeps: missingDeps, channelId: channelId)
# return the result as a json string # return the result as a json string
var node = newJObject() return ok($(%*(res)))
node["message"] = %*res.message
node["channelId"] = %*extractedChannelId
var missingDepsNode = newJArray()
for dep in res.missingDeps:
var depNode = newJObject()
depNode["messageId"] = %*dep.messageId
depNode["retrievalHint"] = %*encode(dep.retrievalHint)
missingDepsNode.add(depNode)
node["missingDeps"] = missingDepsNode
return ok($node)
return ok("") return ok("")

View File

@ -7,7 +7,7 @@ import chronos, chronos/threadsync
import import
../../ffi_types, ../../ffi_types,
./requests/[sds_lifecycle_request, sds_message_request, sds_dependencies_request], ./requests/[sds_lifecycle_request, sds_message_request, sds_dependencies_request],
sds/sds_utils ../../../src/[reliability_utils]
type RequestType* {.pure.} = enum type RequestType* {.pure.} = enum
LIFECYCLE LIFECYCLE

View File

@ -7,7 +7,7 @@ import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single,
import import
../ffi_types, ../ffi_types,
./inter_thread_communication/sds_thread_request, ./inter_thread_communication/sds_thread_request,
sds/sds_utils ../../src/[reliability_utils]
type SdsContext* = object type SdsContext* = object
thread: Thread[(ptr SdsContext)] thread: Thread[(ptr SdsContext)]
@ -20,8 +20,6 @@ type SdsContext* = object
userData*: pointer userData*: pointer
eventCallback*: pointer eventCallback*: pointer
eventUserdata*: pointer eventUserdata*: pointer
retrievalHintProvider*: pointer
retrievalHintUserData*: pointer
running: Atomic[bool] # To control when the thread is running running: Atomic[bool] # To control when the thread is running
proc runSds(ctx: ptr SdsContext) {.async.} = proc runSds(ctx: ptr SdsContext) {.async.} =

View File

@ -1 +0,0 @@
nimcache = "build/nimcache/$projectName"

View File

@ -1,319 +0,0 @@
{
"version": 2,
"packages": {
"unittest2": {
"version": "0.2.5",
"vcsRevision": "26f2ef3ae0ec72a2a75bfe557e02e88f6a31c189",
"url": "https://github.com/status-im/nim-unittest2",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "02bb3751ba9ddc3c17bfd89f2e41cb6bfb8fc0c9"
}
},
"bearssl": {
"version": "0.2.6",
"vcsRevision": "11e798b62b8e6beabe958e048e9e24c7e0f9ee63",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "7e068f119664cf47ad0cfb74ef4c56fb6b616523"
}
},
"bearssl_pkey_decoder": {
"version": "0.1.0",
"vcsRevision": "21dd3710df9345ed2ad8bf8f882761e07863b8e0",
"url": "https://github.com/vacp2p/bearssl_pkey_decoder",
"downloadMethod": "git",
"dependencies": [
"bearssl"
],
"checksums": {
"sha1": "21b42e2e6ddca6c875d3fc50f36a5115abf51714"
}
},
"results": {
"version": "0.5.1",
"vcsRevision": "df8113dda4c2d74d460a8fa98252b0b771bf1f27",
"url": "https://github.com/arnetheduck/nim-results",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a9c011f74bc9ed5c91103917b9f382b12e82a9e7"
}
},
"stew": {
"version": "0.4.2",
"vcsRevision": "b66168735d6f3841c5239c3169d3fe5fe98b1257",
"url": "https://github.com/status-im/nim-stew",
"downloadMethod": "git",
"dependencies": [
"results",
"unittest2"
],
"checksums": {
"sha1": "928e82cb8d2f554e8f10feb2349ee9c32fee3a8c"
}
},
"faststreams": {
"version": "0.5.0",
"vcsRevision": "ce27581a3e881f782f482cb66dc5b07a02bd615e",
"url": "https://github.com/status-im/nim-faststreams",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "ee61e507b805ae1df7ec936f03f2d101b0d72383"
}
},
"serialization": {
"version": "0.5.2",
"vcsRevision": "b0f2fa32960ea532a184394b0f27be37bd80248b",
"url": "https://github.com/status-im/nim-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "fa35c1bb76a0a02a2379fe86eaae0957c7527cb8"
}
},
"json_serialization": {
"version": "0.4.4",
"vcsRevision": "c343b0e243d9e17e2c40f3a8a24340f7c4a71d44",
"url": "https://github.com/status-im/nim-json-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"serialization",
"stew",
"results"
],
"checksums": {
"sha1": "8b3115354104858a0ac9019356fb29720529c2bd"
}
},
"testutils": {
"version": "0.8.0",
"vcsRevision": "e4d37dc1652d5c63afb89907efb5a5e812261797",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "d1678f50aa47d113b4e77d41eec2190830b523fa"
}
},
"chronicles": {
"version": "0.12.2",
"vcsRevision": "27ec507429a4eb81edc20f28292ee8ec420be05b",
"url": "https://github.com/status-im/nim-chronicles",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"serialization",
"json_serialization",
"testutils"
],
"checksums": {
"sha1": "02febb20d088120b2836d3306cfa21f434f88f65"
}
},
"httputils": {
"version": "0.4.0",
"vcsRevision": "c53852d9e24205b6363bba517fa8ee7bde823691",
"url": "https://github.com/status-im/nim-http-utils",
"downloadMethod": "git",
"dependencies": [
"stew",
"results",
"unittest2"
],
"checksums": {
"sha1": "298bc5b6fe4e5aa9c3b7e2ebfa17191675020f10"
}
},
"chronos": {
"version": "4.0.4",
"vcsRevision": "0646c444fce7c7ed08ef6f2c9a7abfd172ffe655",
"url": "https://github.com/status-im/nim-chronos",
"downloadMethod": "git",
"dependencies": [
"results",
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "455802a90204d8ad6b31d53f2efff8ebfe4c834a"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"jwt": {
"version": "0.2",
"vcsRevision": "18f8378de52b241f321c1f9ea905456e89b95c6f",
"url": "https://github.com/vacp2p/nim-jwt.git",
"downloadMethod": "git",
"dependencies": [
"bearssl",
"bearssl_pkey_decoder"
],
"checksums": {
"sha1": "bcfd6fc9c5e10a52b87117219b7ab5c98136bc8e"
}
},
"nimcrypto": {
"version": "0.7.3",
"vcsRevision": "b3dbc9c4d08e58c5b7bfad6dc7ef2ee52f2f4c08",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f72b90fe3f4da09efa482de4f8729e7ee4abea2f"
}
},
"metrics": {
"version": "0.1.2",
"vcsRevision": "11d0cddfb0e711aa2a8c75d1892ae24a64c299fc",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos",
"results",
"stew"
],
"checksums": {
"sha1": "5cdac99d85d3c146d170e85064c88fb28f377842"
}
},
"secp256k1": {
"version": "0.6.0.3.2",
"vcsRevision": "d8f1288b7c72f00be5fc2c5ea72bf5cae1eafb15",
"url": "https://github.com/status-im/nim-secp256k1",
"downloadMethod": "git",
"dependencies": [
"stew",
"results",
"nimcrypto"
],
"checksums": {
"sha1": "6618ef9de17121846a8c1d0317026b0ce8584e10"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "e680f269fb01af2c34a2ba879ff281795a5258fe",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew",
"results"
],
"checksums": {
"sha1": "bbde4f5a97a84b450fef7d107461e5f35cf2b47f"
}
},
"websock": {
"version": "0.2.1",
"vcsRevision": "35ae76f1559e835c80f9c1a3943bf995d3dd9eb5",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"results",
"zlib"
],
"checksums": {
"sha1": "1cb5efa10cd389bc01d0707c242ae010c76a03cd"
}
},
"lsquic": {
"version": "0.0.1",
"vcsRevision": "4fb03ee7bfb39aecb3316889fdcb60bec3d0936f",
"url": "https://github.com/vacp2p/nim-lsquic",
"downloadMethod": "git",
"dependencies": [
"zlib",
"stew",
"chronos",
"nimcrypto",
"unittest2",
"chronicles"
],
"checksums": {
"sha1": "f465fa994346490d0924d162f53d9b5aec62f948"
}
},
"libp2p": {
"version": "1.15.2",
"vcsRevision": "ca48c3718246bb411ff0e354a70cb82d9a28de0d",
"url": "https://github.com/vacp2p/nim-libp2p",
"downloadMethod": "git",
"dependencies": [
"nimcrypto",
"dnsclient",
"bearssl",
"chronicles",
"chronos",
"metrics",
"secp256k1",
"stew",
"websock",
"unittest2",
"results",
"lsquic",
"jwt"
],
"checksums": {
"sha1": "3b2cdc7e00261eb4210ca3d44ec3bd64c2b7bbba"
}
},
"stint": {
"version": "0.8.2",
"vcsRevision": "470b7892561b5179ab20bd389a69217d6213fe58",
"url": "https://github.com/status-im/nim-stint",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "d8f871fd617e7857192d4609fe003b48942a8ae5"
}
},
"taskpools": {
"version": "0.1.0",
"vcsRevision": "9e8ccc754631ac55ac2fd495e167e74e86293edb",
"url": "https://github.com/status-im/nim-taskpools",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "09e1b2fdad55b973724d61227971afc0df0b7a81"
}
}
},
"tasks": {}
}

View File

@ -9,15 +9,27 @@ nix develop
## Building ## Building
To simply build you can use: To build a Codex you can use:
```sh ```sh
nix build '.#libsds' nix build '.?submodules=1#default'
``` ```
The `?submodules=1` part should eventually not be necessary.
For more details see:
https://github.com/NixOS/nix/issues/4423
It can be also done without even cloning the repo: It can be also done without even cloning the repo:
```sh ```sh
nix build github:waku-org/nim-sds nix build 'git+https://github.com/waku-org/nim-sds?submodules=1#'
nix build github:waku-org/nim-sds#libsds-ios ```
nix build github:waku-org/nim-sds#libsds-android-arm64"
## Running
```sh
nix run 'git+https://github.com/waku-org/nim-sds?submodules=1#''
```
## Testing
```sh
nix flake check ".?submodules=1#"
``` ```
Or as a flake input.

View File

@ -1,121 +1,76 @@
{ {
pkgs, config ? {},
pkgs ? import <nixpkgs> { },
src ? ../., src ? ../.,
# Nimble targets to build (task names from sds.nimble). targets ? ["libsds-android-arm64"],
targets ? ["libsdsAndroidArm64"], verbosity ? 2,
# These are the only platforms tested in CI and considered stable. useSystemNim ? true,
stableSystems ? ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "x86_64-windows"], quickAndDirty ? true,
stableSystems ? [
"x86_64-linux" "aarch64-linux"
]
}: }:
assert pkgs.lib.assertMsg ((src.submodules or true) == true)
"Unable to build without submodules. Append '?submodules=1#' to the URI.";
let let
inherit (pkgs) stdenv lib writeScriptBin callPackage; inherit (pkgs) stdenv lib writeScriptBin callPackage;
inherit (lib) any match substring optionals optionalString; inherit (lib) any match substring optionals optionalString;
# Check if build is for android platform. # Check if build is for android platform.
containsAndroid = s: (match ".*[Aa]ndroid.*" s) != null; containsAndroid = s: (match ".*android.*" s) != null;
isAndroidBuild = any containsAndroid targets; isAndroidBuild = any containsAndroid targets;
tools = callPackage ./tools.nix {}; version = substring 0 8 (src.sourceInfo.rev or "dirty");
revision = substring 0 8 (src.rev or src.dirtyRev or "00000000"); in stdenv.mkDerivation rec {
version = tools.findKeyValue "^version = \"([a-f0-9.-]+)\"$" ../sds.nimble;
# Fetched dep sources, keyed by package name.
deps = import ./deps.nix { inherit pkgs; };
# nimble.lock metadata (version + checksums) for pkgs2 directory naming.
lockFile = builtins.fromJSON (builtins.readFile ../nimble.lock);
lockPkgs = lockFile.packages;
# nimble.paths for the Nim compiler (read by config.nims).
# Paths must be double-quoted so that NimScript can parse the include correctly.
nimblePaths = pkgs.writeText "nimble.paths" (
builtins.concatStringsSep "\n" (
[ "--noNimblePath" ] ++
builtins.concatMap (p: [ "--path:\"${p}\"" "--path:\"${p}/src\"" ])
(builtins.attrValues deps)
)
);
# Shell commands to populate pkgs2 with writable copies of only the Nim
# source files nimble needs for dependency resolution. Full source for
# compilation is provided via nimble.paths pointing to the Nix store.
# Using rsync (same file filter as the old fixed-output deps derivation).
# Each dir also gets a nimblemeta.json so nimble recognises it as installed
# and does not attempt to re-download the package.
pkgs2SetupCmds = lib.concatStringsSep "\n" (
lib.mapAttrsToList (name: dep:
let
meta = lockPkgs.${name};
dirName = "${name}-${meta.version}-${meta.checksums.sha1}";
nimbleMeta = pkgs.writeText "${name}-nimblemeta.json" (builtins.toJSON {
version = 1;
metaData = {
url = meta.url;
downloadMethod = "git";
vcsRevision = meta.vcsRevision;
files = [];
binaries = [];
specialVersions = [ meta.version ];
};
});
in ''
mkdir -p "$NIMBLE_DIR/pkgs2/${dirName}"
rsync -a \
--include='*/' \
--include='*.nim' \
--include='*.nims' \
--include='*.nimble' \
--include='*.json' \
--exclude='*' \
${dep}/ "$NIMBLE_DIR/pkgs2/${dirName}/"
chmod -R u+w "$NIMBLE_DIR/pkgs2/${dirName}"
cp ${nimbleMeta} "$NIMBLE_DIR/pkgs2/${dirName}/nimblemeta.json"
''
) deps
);
in stdenv.mkDerivation {
pname = "nim-sds"; pname = "nim-sds";
inherit src; inherit src version;
version = "${version}-${revision}";
env = {
NIMFLAGS = "-d:disableMarchNative";
ANDROID_SDK_ROOT = optionalString isAndroidBuild pkgs.androidPkgs.sdk;
ANDROID_NDK_ROOT = optionalString isAndroidBuild pkgs.androidPkgs.ndk;
};
buildInputs = with pkgs; [ buildInputs = with pkgs; [
openssl gmp zip nim-2_2 git nimble openssl
gmp
zip
]; ];
# Dependencies that should only exist in the build environment. # Dependencies that should only exist in the build environment.
nativeBuildInputs = with pkgs; [ nativeBuildInputs = let
nim-2_2 nimble rsync cmake which patchelf # Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'.
fakeGit = writeScriptBin "git" "echo ${version}";
in with pkgs; [
cmake
which
nim-unwrapped-2_2
fakeGit
] ++ optionals stdenv.isLinux [ ] ++ optionals stdenv.isLinux [
pkgs.lsb-release pkgs.lsb-release
]; ];
ANDROID_SDK_ROOT = optionalString isAndroidBuild pkgs.androidPkgs.sdk;
ANDROID_NDK_ROOT = optionalString isAndroidBuild pkgs.androidPkgs.ndk;
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${version}";
XDG_CACHE_HOME = "/tmp";
makeFlags = targets ++ [
"V=${toString verbosity}"
"USE_SYSTEM_NIM=1"
];
configurePhase = '' configurePhase = ''
export NIMBLE_DIR=$NIX_BUILD_TOP/nimbledeps patchShebangs . vendor/nimbus-build-system > /dev/null
mkdir -p $NIMBLE_DIR/pkgs2 make nimbus-build-system-paths
make nimbus-build-system-nimble-dir
# Populate pkgs2 with writable copies so nimble considers deps installed
# and does not attempt to download them (which fails in the Nix sandbox).
${pkgs2SetupCmds}
# Write nimble.paths so config.nims passes --path: flags to the Nim compiler.
cp ${nimblePaths} ./nimble.paths
''; '';
buildPhase = lib.concatMapStringsSep "\n" (target: '' preBuild = ''
nimble --verbose ${target} ln -s sds.nimble sds.nims
'') targets; '';
installPhase = let installPhase = let
androidManifest = '' androidManifest = ''
<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"org.waku.nim-sds\" /> <manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"org.waku.${pname}\" />
''; '';
in if isAndroidBuild then '' in if isAndroidBuild then ''
mkdir -p $out/jni mkdir -p $out/jni
@ -125,7 +80,7 @@ in stdenv.mkDerivation {
zip -r libwaku.aar * zip -r libwaku.aar *
'' else '' '' else ''
mkdir -p $out/lib -p $out/include mkdir -p $out/lib -p $out/include
cp build/lib* $out/lib/ cp build/* $out/lib/
cp library/libsds.h $out/include/ cp library/libsds.h $out/include/
''; '';
@ -133,6 +88,6 @@ in stdenv.mkDerivation {
description = "Nim implementation of the e2e reliability protocol"; description = "Nim implementation of the e2e reliability protocol";
homepage = "https://github.com/status-im/nim-sds"; homepage = "https://github.com/status-im/nim-sds";
license = licenses.mit; license = licenses.mit;
platforms = stableSystems; platforms = ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "x86_64-windows"];
}; };
} }

View File

@ -1,167 +0,0 @@
# AUTOGENERATED from nimble.lock — do not edit manually.
# Regenerate with: ./tools/gen-nix-deps.sh nimble.lock nix/deps.nix
{ pkgs }:
{
unittest2 = pkgs.fetchgit {
url = "https://github.com/status-im/nim-unittest2";
rev = "26f2ef3ae0ec72a2a75bfe557e02e88f6a31c189";
sha256 = "1n8n36kad50m97b64y7bzzknz9n7szffxhp0bqpk3g2v7zpda8sw";
fetchSubmodules = true;
};
bearssl = pkgs.fetchgit {
url = "https://github.com/status-im/nim-bearssl";
rev = "11e798b62b8e6beabe958e048e9e24c7e0f9ee63";
sha256 = "0qx36iiawrhmx9qjqcyfvz0134ph9dy8ryq3ch8d31gq6ir7aw84";
fetchSubmodules = true;
};
bearssl_pkey_decoder = pkgs.fetchgit {
url = "https://github.com/vacp2p/bearssl_pkey_decoder";
rev = "21dd3710df9345ed2ad8bf8f882761e07863b8e0";
sha256 = "0bl3f147zmkazbhdkr4cj1nipf9rqiw3g4hh1j424k9hpl55zdpg";
fetchSubmodules = true;
};
results = pkgs.fetchgit {
url = "https://github.com/arnetheduck/nim-results";
rev = "df8113dda4c2d74d460a8fa98252b0b771bf1f27";
sha256 = "1h7amas16sbhlr7zb7n3jb5434k98ji375vzw72k1fsc86vnmcr9";
fetchSubmodules = true;
};
stew = pkgs.fetchgit {
url = "https://github.com/status-im/nim-stew";
rev = "b66168735d6f3841c5239c3169d3fe5fe98b1257";
sha256 = "10n71vfa6klzd9dmal96jy0hiqk04gaj8wc9g91z6fclryf0yq92";
fetchSubmodules = true;
};
faststreams = pkgs.fetchgit {
url = "https://github.com/status-im/nim-faststreams";
rev = "ce27581a3e881f782f482cb66dc5b07a02bd615e";
sha256 = "0y6bw2scnmr8cxj4fg18w7f34l2bh9qwg5nhlgd84m9fpr5bqarn";
fetchSubmodules = true;
};
serialization = pkgs.fetchgit {
url = "https://github.com/status-im/nim-serialization";
rev = "b0f2fa32960ea532a184394b0f27be37bd80248b";
sha256 = "0wip1fjx7ka39ck1g1xvmyarzq1p5dlngpqil6zff8k8z5skiz27";
fetchSubmodules = true;
};
json_serialization = pkgs.fetchgit {
url = "https://github.com/status-im/nim-json-serialization";
rev = "c343b0e243d9e17e2c40f3a8a24340f7c4a71d44";
sha256 = "0i8sq51nqj8lshf6bfixaz9a7sq0ahsbvq3chkxdvv4khsqvam91";
fetchSubmodules = true;
};
testutils = pkgs.fetchgit {
url = "https://github.com/status-im/nim-testutils";
rev = "e4d37dc1652d5c63afb89907efb5a5e812261797";
sha256 = "0nv0a9jm5b1rn3y52cxvyj8xz3jg235mp0xbirfp2cda0icgy1si";
fetchSubmodules = true;
};
chronicles = pkgs.fetchgit {
url = "https://github.com/status-im/nim-chronicles";
rev = "27ec507429a4eb81edc20f28292ee8ec420be05b";
sha256 = "1xx9fcfwgcaizq3s7i3s03mclz253r5j8va38l9ycl19fcbc96z9";
fetchSubmodules = true;
};
httputils = pkgs.fetchgit {
url = "https://github.com/status-im/nim-http-utils";
rev = "c53852d9e24205b6363bba517fa8ee7bde823691";
sha256 = "1b332smfyp2yvhvfjrfqy4kvh9pc5w6hqh17f1yclz5z1j5xdpf1";
fetchSubmodules = true;
};
chronos = pkgs.fetchgit {
url = "https://github.com/status-im/nim-chronos";
rev = "0646c444fce7c7ed08ef6f2c9a7abfd172ffe655";
sha256 = "1r499jl0lhnjq7hgddwgjl0gh3y1mprnqkhk0h6yh3cwgsmr5ym9";
fetchSubmodules = true;
};
dnsclient = pkgs.fetchgit {
url = "https://github.com/ba0f3/dnsclient.nim";
rev = "23214235d4784d24aceed99bbfe153379ea557c8";
sha256 = "03mf3lw5c0m5nq9ppa49nylrl8ibkv2zzlc0wyhqg7w09kz6hks6";
fetchSubmodules = true;
};
jwt = pkgs.fetchgit {
url = "https://github.com/vacp2p/nim-jwt.git";
rev = "18f8378de52b241f321c1f9ea905456e89b95c6f";
sha256 = "1986czmszdxj6g9yr7xn1fx8y2y9mwpb3f1bn9nc6973qawsdm0p";
fetchSubmodules = true;
};
nimcrypto = pkgs.fetchgit {
url = "https://github.com/cheatfate/nimcrypto";
rev = "b3dbc9c4d08e58c5b7bfad6dc7ef2ee52f2f4c08";
sha256 = "1v4rz42lwcazs6isi3kmjylkisr84mh0kgmlqycx4i885dn3g0l4";
fetchSubmodules = true;
};
metrics = pkgs.fetchgit {
url = "https://github.com/status-im/nim-metrics";
rev = "11d0cddfb0e711aa2a8c75d1892ae24a64c299fc";
sha256 = "1jrf2cf7v3iqjsk6grzvivxic1shhaxnvab6d35rxs2kcy6b5dv0";
fetchSubmodules = true;
};
secp256k1 = pkgs.fetchgit {
url = "https://github.com/status-im/nim-secp256k1";
rev = "d8f1288b7c72f00be5fc2c5ea72bf5cae1eafb15";
sha256 = "1qjrmwbngb73f6r1fznvig53nyal7wj41d1cmqfksrmivk2sgrn2";
fetchSubmodules = true;
};
zlib = pkgs.fetchgit {
url = "https://github.com/status-im/nim-zlib";
rev = "e680f269fb01af2c34a2ba879ff281795a5258fe";
sha256 = "1xw9f1gjsgqihdg7kdkbaq1wankgnx2vn9l3ihc6nqk2jzv5bvk5";
fetchSubmodules = true;
};
websock = pkgs.fetchgit {
url = "https://github.com/status-im/nim-websock";
rev = "35ae76f1559e835c80f9c1a3943bf995d3dd9eb5";
sha256 = "1j6dklzb6b6bv2aiglbiyflja2vdpmyxfirv98f49y62mykq0yrw";
fetchSubmodules = true;
};
lsquic = pkgs.fetchgit {
url = "https://github.com/vacp2p/nim-lsquic";
rev = "4fb03ee7bfb39aecb3316889fdcb60bec3d0936f";
sha256 = "0qdhcd4hyp185szc9sv3jvwdwc9zp3j0syy7glxv13k9bchfmkfg";
fetchSubmodules = true;
};
libp2p = pkgs.fetchgit {
url = "https://github.com/vacp2p/nim-libp2p";
rev = "ca48c3718246bb411ff0e354a70cb82d9a28de0d";
sha256 = "07qfjjrq6w7bj9dbchvcrpla47jidngbrgmigbhl7fh3cfkdabc9";
fetchSubmodules = true;
};
stint = pkgs.fetchgit {
url = "https://github.com/status-im/nim-stint";
rev = "470b7892561b5179ab20bd389a69217d6213fe58";
sha256 = "1isfwmbj98qfi5pm9acy0yyvq0vlz38nxp30xl43jx2mmaga2w22";
fetchSubmodules = true;
};
taskpools = pkgs.fetchgit {
url = "https://github.com/status-im/nim-taskpools";
rev = "9e8ccc754631ac55ac2fd495e167e74e86293edb";
sha256 = "1y78l33vdjxmb9dkr455pbphxa73rgdsh8m9gpkf4d9b1wm1yivy";
fetchSubmodules = true;
};
}

View File

@ -10,8 +10,8 @@
androidenv.composeAndroidPackages { androidenv.composeAndroidPackages {
cmdLineToolsVersion = "9.0"; cmdLineToolsVersion = "9.0";
toolsVersion = "26.1.1"; toolsVersion = "26.1.1";
platformToolsVersion = "35.0.2"; platformToolsVersion = "34.0.5";
buildToolsVersions = [ "35.0.0" ]; buildToolsVersions = [ "34.0.0" ];
platformVersions = [ "34" ]; platformVersions = [ "34" ];
cmakeVersions = [ "3.22.1" ]; cmakeVersions = [ "3.22.1" ];
ndkVersion = "27.2.12479018"; ndkVersion = "27.2.12479018";

View File

@ -1,27 +1,27 @@
{ {
pkgs ? import <nixpkgs> { }, pkgs ? import <nixpkgs> { },
}: }:
let let
inherit (pkgs) lib stdenv; inherit (pkgs) lib stdenv;
/* No Android SDK for Darwin aarch64. */
isMacM1 = stdenv.isDarwin && stdenv.isAarch64;
in pkgs.mkShell { in pkgs.mkShell {
inputsFrom = [ inputsFrom = lib.optionals (!isMacM1) [
pkgs.androidShell pkgs.androidShell
]; ];
buildInputs = with pkgs; [ buildInputs = with pkgs; [
nim-2_2
nimble
which which
git git
cmake cmake
nim-unwrapped-2_2
] ++ lib.optionals stdenv.isDarwin [ ] ++ lib.optionals stdenv.isDarwin [
pkgs.libiconv pkgs.libiconv
]; ];
# Avoid compiling Nim itself. # Avoid compiling Nim itself.
shellHook = '' shellHook = ''
export USE_SYSTEM_NIM=1 export MAKEFLAGS='USE_SYSTEM_NIM=1'
''; '';
} }

View File

@ -1,15 +0,0 @@
{ pkgs ? import <nixpkgs> { } }:
let
inherit (pkgs.lib) fileContents last splitString flatten remove;
inherit (builtins) map match;
in {
findKeyValue = regex: sourceFile:
let
linesFrom = file: splitString "\n" (fileContents file);
matching = regex: lines: map (line: match regex line) lines;
extractMatch = matches: last (flatten (remove null matches));
in
extractMatch (matching regex (linesFrom sourceFile));
}

16
reliability.nimble Normal file
View File

@ -0,0 +1,16 @@
# Package
version = "0.1.0"
author = "Waku Team"
description = "E2E Reliability Protocol API"
license = "MIT"
srcDir = "src"
# Dependencies
requires "nim >= 2.0.8"
requires "chronicles"
requires "libp2p"
# Tasks
task test, "Run the test suite":
exec "nim c -r tests/test_bloom.nim"
exec "nim c -r tests/test_reliability.nim"

View File

@ -1,67 +1,50 @@
import strutils, os mode = ScriptMode.Verbose
import strutils
# Package # Package
version = "0.2.4" version = "0.1.0"
author = "Logos Messaging Team" author = "Waku Team"
description = "E2E Scalable Data Sync API" description = "E2E Reliability Protocol API"
license = "MIT" license = "MIT"
srcDir = "sds" srcDir = "src"
# Dependencies # Dependencies
requires "nim >= 2.2.4" requires "nim >= 2.2.4",
requires "chronos >= 4.0.4" "chronicles", "chronos", "stew", "stint", "metrics", "libp2p", "results"
requires "libp2p >= 1.15.2"
requires "chronicles"
requires "stew"
requires "stint"
requires "metrics"
requires "results"
requires "taskpools >= 0.1.0" ## This should be removed when using nim-ffi dependency
proc buildLibrary( proc buildLibrary(
outLibNameAndExt: string, outLibNameAndExt: string,
name: string, name: string,
srcDir = "./", srcDir = "./",
extra_params = "", params = "",
`type` = "static", `type` = "static",
) = ) =
if not dirExists "build": if not dirExists "build":
mkDir "build" mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
for i in 2 ..< paramCount():
extra_params &= " " & paramStr(i)
if `type` == "static": if `type` == "static":
exec "nim c" & " --out:build/" & outLibNameAndExt & exec "nim c" & " --out:build/" & outLibNameAndExt &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds " & " --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds --skipParentCfg:on " &
extra_params & " " & srcDir & name & ".nim" extra_params & " " & srcDir & name & ".nim"
else: else:
when defined(windows): when defined(windows):
exec "nim c" & " --out:build/" & outLibNameAndExt & exec "nim c" & " --out:build/" & outLibNameAndExt &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds " & " --threads:on --app:lib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds --skipParentCfg:off " &
extra_params & " " & srcDir & name & ".nim" extra_params & " " & srcDir & name & ".nim"
else: else:
exec "nim c" & " --out:build/" & outLibNameAndExt & exec "nim c" & " --out:build/" & outLibNameAndExt &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds " & " --threads:on --app:lib --opt:size --noMain --mm:refc --header --nimMainPrefix:libsds --skipParentCfg:on " &
extra_params & " " & srcDir & name & ".nim" extra_params & " " & srcDir & name & ".nim"
proc getMyCpu(): string = proc getArch(): string =
## Returns a Nim-compatible CPU name (e.g. amd64, arm64) for the host. let arch = getEnv("ARCH")
## Respects the ARCH environment variable when set. if arch != "": return $arch
let envArch = getEnv("ARCH")
if envArch != "":
return envArch
when defined(arm64):
return "arm64"
elif defined(amd64):
return "amd64"
else:
let (archFromUname, _) = gorgeEx("uname -m") let (archFromUname, _) = gorgeEx("uname -m")
let a = archFromUname.strip() return $archFromUname
return
if a == "x86_64":
"amd64"
elif a == "aarch64":
"arm64"
else:
a
# Tasks # Tasks
task test, "Run the test suite": task test, "Run the test suite":
@ -88,17 +71,13 @@ task libsdsDynamicMac, "Generate bindings":
let outLibNameAndExt = "libsds.dylib" let outLibNameAndExt = "libsds.dylib"
let name = "libsds" let name = "libsds"
let cpu = getMyCpu() let arch = getArch()
let clangArch = if cpu == "amd64": "x86_64" else: cpu
let sdkPath = staticExec("xcrun --show-sdk-path").strip() let sdkPath = staticExec("xcrun --show-sdk-path").strip()
let archFlags = let archFlags = (if arch == "arm64": "--cpu:arm64 --passC:\"-arch arm64\" --passL:\"-arch arm64\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\""
"--cpu:" & cpu & " --passC:\"-arch " & clangArch & "\" --passL:\"-arch " & clangArch & else: "--cpu:amd64 --passC:\"-arch x86_64\" --passL:\"-arch x86_64\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\"")
"\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\""
buildLibrary outLibNameAndExt, buildLibrary outLibNameAndExt,
name, name, "library/",
"library/", archFlags & " -d:chronicles_line_numbers --warning:Deprecated:off --warning:UnusedImport:on -d:chronicles_log_level=TRACE",
archFlags &
" -d:chronicles_line_numbers --warning:Deprecated:off --warning:UnusedImport:on -d:chronicles_log_level=TRACE",
"dynamic" "dynamic"
task libsdsStaticWindows, "Generate bindings": task libsdsStaticWindows, "Generate bindings":
@ -121,17 +100,13 @@ task libsdsStaticMac, "Generate bindings":
let outLibNameAndExt = "libsds.a" let outLibNameAndExt = "libsds.a"
let name = "libsds" let name = "libsds"
let cpu = getMyCpu() let arch = getArch()
let clangArch = if cpu == "amd64": "x86_64" else: cpu
let sdkPath = staticExec("xcrun --show-sdk-path").strip() let sdkPath = staticExec("xcrun --show-sdk-path").strip()
let archFlags = let archFlags = (if arch == "arm64": "--cpu:arm64 --passC:\"-arch arm64\" --passL:\"-arch arm64\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\""
"--cpu:" & cpu & " --passC:\"-arch " & clangArch & "\" --passL:\"-arch " & clangArch & else: "--cpu:amd64 --passC:\"-arch x86_64\" --passL:\"-arch x86_64\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\"")
"\" --passC:\"-isysroot " & sdkPath & "\" --passL:\"-isysroot " & sdkPath & "\""
buildLibrary outLibNameAndExt, buildLibrary outLibNameAndExt,
name, name, "library/",
"library/", archFlags & " -d:chronicles_line_numbers --warning:Deprecated:off --warning:UnusedImport:on -d:chronicles_log_level=TRACE",
archFlags &
" -d:chronicles_line_numbers --warning:Deprecated:off --warning:UnusedImport:on -d:chronicles_log_level=TRACE",
"static" "static"
# Build Mobile iOS # Build Mobile iOS
@ -139,175 +114,62 @@ proc buildMobileIOS(srcDir = ".", sdkPath = "") =
echo "Building iOS libsds library" echo "Building iOS libsds library"
let outDir = "build" let outDir = "build"
let nimcacheDir = outDir & "/nimcache"
if dirExists nimcacheDir:
rmDir nimcacheDir
if not dirExists outDir: if not dirExists outDir:
mkDir outDir mkDir outDir
if sdkPath.len == 0: if sdkPath.len == 0:
quit "Error: Xcode/iOS SDK not found" quit "Error: Xcode/iOS SDK not found"
# The output static library
let cFile = outDir & "/nimcache/@mlibsds.nim.c"
let oFile = outDir & "/libsds.o"
let aFile = outDir & "/libsds.a" let aFile = outDir & "/libsds.a"
let aFileTmp = outDir & "/libsds_tmp.a"
let cpu = getMyCpu()
let clangArch = if cpu == "amd64": "x86_64" else: cpu
# 1) Generate C sources from Nim (no linking) # 1) Generate C sources from Nim (no linking)
# Use unique symbol prefix to avoid conflicts with other Nim libraries exec "nim c" &
exec "nim c" & " --nimcache:" & nimcacheDir & " --os:ios --cpu:" & cpu & " --nimcache:build/nimcache --os:ios --cpu:arm64" &
" --compileOnly:on" & " --noMain --mm:refc" & " --threads:on --opt:size --header" & " --compileOnly:on" &
" --nimMainPrefix:libsds" & " --cc:clang" & " -d:useMalloc" & " " & srcDir & " --noMain --mm:refc" &
"/libsds.nim" " --threads:on --opt:size --header" &
" --nimMainPrefix:libsds --skipParentCfg:on" &
" --cc:clang" &
" --out:" & cFile & " " &
srcDir & "/libsds.nim"
# 2) Compile all generated C files to object files with hidden visibility exec "clang -arch arm64" &
# This prevents symbol conflicts with other Nim libraries (e.g., libnim_status_client) " -isysroot " & sdkPath &
# Locate nimbase.h: try next to the nim binary first (jiro4989/setup-nim-action " -I./vendor/nimbus-build-system/vendor/Nim/lib/" &
# puts nim at .nim_runtime/bin/nim with lib/ alongside), then fall back to the " -fembed-bitcode -c " & cFile &
# choosenim toolchain directory (~/.choosenim/toolchains/nim-VERSION/lib/). " -o " & oFile
let (nimBin, _) = gorgeEx("which nim")
let nimLibFromBin = parentDir(parentDir(nimBin.strip())) / "lib"
let nimLibChoosenim = getHomeDir() / ".choosenim/toolchains/nim-" & NimVersion & "/lib"
let nimLibDir =
if fileExists(nimLibFromBin / "nimbase.h"): nimLibFromBin
else: nimLibChoosenim
let clangFlags =
"-arch " & clangArch & " -isysroot " & sdkPath & " -I" & nimLibDir &
" -fembed-bitcode -miphoneos-version-min=16.2 -O2" & " -fvisibility=hidden"
var objectFiles: seq[string] = @[] exec "ar rcs " & aFile & " " & oFile
for cFile in listFiles(nimcacheDir):
if cFile.endsWith(".c"):
let oFile = cFile.changeFileExt("o")
exec "clang " & clangFlags & " -c " & cFile & " -o " & oFile
objectFiles.add(oFile)
# 3) Create static library from all object files
exec "ar rcs " & aFileTmp & " " & objectFiles.join(" ")
# 4) Use libtool to localize all non-public symbols
# Keep only Sds* functions as global, hide everything else to prevent conflicts
# with nim runtime symbols from libnim_status_client
let keepSymbols =
"_Sds*:_libsdsNimMain:_libsdsDatInit*:_libsdsInit*:_NimMainModule__libsds*"
exec "xcrun libtool -static -o " & aFile & " " & aFileTmp &
" -exported_symbols_list /dev/stdin <<< '" & keepSymbols & "' 2>/dev/null || cp " &
aFileTmp & " " & aFile
echo "✔ iOS library created: " & aFile echo "✔ iOS library created: " & aFile
task libsdsIOS, "Build the mobile bindings for iOS": task libsdsIOS, "Build the mobile bindings for iOS":
let srcDir = "./library" let srcDir = "./library"
var sdkPath = getEnv("IOS_SDK_PATH") let sdkPath = getEnv("IOS_SDK_PATH")
if sdkPath.len == 0:
let (detected, exitCode) = gorgeEx("xcrun --show-sdk-path --sdk iphoneos")
if exitCode == 0:
sdkPath = detected.strip()
buildMobileIOS srcDir, sdkPath buildMobileIOS srcDir, sdkPath
### Mobile Android ### Mobile Android
proc checkAndroidNdk() = proc buildMobileAndroid(srcDir = ".", params = "") =
let ndkRoot = getEnv("ANDROID_NDK_ROOT") let cpu = getArch()
if ndkRoot.len == 0:
quit """Error: ANDROID_NDK_ROOT is not set."""
if not dirExists(ndkRoot):
quit "Error: ANDROID_NDK_ROOT points to a non-existent directory: " & ndkRoot
# source.properties contains Pkg.Revision — present in every NDK since r10.
let propsFile = ndkRoot / "source.properties"
if not fileExists(propsFile):
quit "Error: " & ndkRoot & " does not look like a valid NDK (source.properties not found)."
let (props, _) = gorgeEx("cat " & propsFile)
var revision = ""
for line in props.splitLines():
if line.startsWith("Pkg.Revision"):
let parts = line.split('=')
if parts.len == 2:
revision = parts[1].strip()
if revision.len == 0:
quit "Error: Could not read NDK version from " & propsFile
echo "Android NDK version: " & revision
proc buildMobileAndroid(srcDir = ".", extra_params = "") = let outDir = "build/"
let cpu = getMyCpu()
let ndkRoot = getEnv("ANDROID_NDK_ROOT")
let androidTarget = "30"
# Map Nim CPU name → NDK target triple and include dirname.
let (androidArch, archDirname) =
if cpu == "arm64": ("aarch64-linux-android", "aarch64-linux-android")
elif cpu == "amd64": ("x86_64-linux-android", "x86_64-linux-android")
elif cpu == "i386": ("i686-linux-android", "i686-linux-android")
else: ("armv7a-linux-androideabi","arm-linux-androideabi")
# NDK prebuilt toolchain — location differs by host OS.
let (hostOS, _) = gorgeEx("uname -s")
let ndkHostTag =
if hostOS.strip() == "Darwin": "darwin-x86_64"
else: "linux-x86_64"
let toolchainDir = ndkRoot / "toolchains/llvm/prebuilt" / ndkHostTag
let sysroot = toolchainDir / "sysroot"
let ndkClang = toolchainDir / "bin" / (androidArch & androidTarget & "-clang")
let outDir = "build"
if not dirExists outDir: if not dirExists outDir:
mkDir outDir mkDir outDir
exec "nim c" & var extra_params = params
" --out:" & outDir & "/libsds.so" & for i in 2 ..< paramCount():
" --threads:on --app:lib --opt:size --noMain --mm:refc --nimMainPrefix:libsds" & extra_params &= " " & paramStr(i)
" --cc:clang" &
" --clang.exe:\"" & ndkClang & "\"" &
" --clang.linkerexe:\"" & ndkClang & "\"" &
" --cpu:" & cpu &
" --os:android" &
" -d:androidNDK" &
" -d:chronosEventEngine=epoll" &
" --passC:\"--sysroot=" & sysroot & "\"" &
" --passL:\"--sysroot=" & sysroot & "\"" &
" --passC:\"--target=" & androidArch & androidTarget & "\"" &
" --passL:\"--target=" & androidArch & androidTarget & "\"" &
" --passC:\"-I" & sysroot & "/usr/include\"" &
" --passC:\"-I" & sysroot & "/usr/include/" & archDirname & "\"" &
" --passL:\"-L" & sysroot & "/usr/lib/" & archDirname & "/" & androidTarget & "\"" &
" --passL:-llog" &
" -d:chronicles_sinks=textlines[dynamic]" &
" --header" &
" " & extra_params &
" " & srcDir & "/libsds.nim"
task libsdsAndroid, "Build the mobile bindings for Android (uses ARCH env var)": exec "nim c" & " --out:" & outDir &
checkAndroidNdk() "/libsds.so --threads:on --app:lib --opt:size --noMain --mm:refc --nimMainPrefix:libsds " &
"-d:chronicles_sinks=textlines[dynamic] --header --passL:-L" & outdir &
" --passL:-llog --cpu:" & cpu & " --os:android -d:androidNDK " & extra_params & " " &
srcDir & "/libsds.nim"
task libsdsAndroid, "Build the mobile bindings for Android":
let srcDir = "./library" let srcDir = "./library"
buildMobileAndroid srcDir, "-d:chronicles_log_level=ERROR" let extraParams = "-d:chronicles_log_level=ERROR"
buildMobileAndroid srcDir, extraParams
task libsdsAndroidArm64, "Build Android arm64 bindings":
checkAndroidNdk()
putEnv("ARCH", "arm64")
buildMobileAndroid "./library", "-d:chronicles_log_level=ERROR"
task libsdsAndroidAmd64, "Build Android amd64 bindings":
checkAndroidNdk()
putEnv("ARCH", "amd64")
buildMobileAndroid "./library", "-d:chronicles_log_level=ERROR"
task libsdsAndroidX86, "Build Android x86 bindings":
checkAndroidNdk()
putEnv("ARCH", "i386")
buildMobileAndroid "./library", "-d:chronicles_log_level=ERROR"
task libsdsAndroidArm, "Build Android arm bindings":
checkAndroidNdk()
putEnv("ARCH", "arm")
buildMobileAndroid "./library", "-d:chronicles_log_level=ERROR"
task libsds, "Build the shared library for the current platform":
when defined(macosx):
exec "nimble libsdsDynamicMac"
elif defined(windows):
exec "nimble libsdsDynamicWindows"
else:
exec "nimble libsdsDynamicLinux"
task clean, "Remove build artifacts":
if dirExists "build":
rmDir "build"

View File

@ -1,16 +0,0 @@
import ./types/sds_message_id
import ./types/history_entry
import ./types/sds_message
import ./types/unacknowledged_message
import ./types/incoming_message
import ./types/repair_entry
import ./types/reliability_config
export
sds_message_id,
history_entry,
sds_message,
unacknowledged_message,
incoming_message,
repair_entry,
reliability_config

View File

@ -1,19 +0,0 @@
# adapted from https://github.com/waku-org/nwaku/blob/master/waku/common/protobuf.nim
{.push raises: [].}
import libp2p/protobuf/minprotobuf
import libp2p/varint
import ./types/protobuf_error
export minprotobuf, varint, protobuf_error
converter toProtobufError*(err: minprotobuf.ProtoError): ProtobufError =
case err
of minprotobuf.ProtoError.RequiredFieldMissing:
return ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: "unknown")
else:
return ProtobufError(kind: ProtobufErrorKind.DecodeFailure, error: err)
proc missingRequiredField*(T: type ProtobufError, field: string): T =
return ProtobufError.init(field)

View File

@ -1,269 +0,0 @@
import std/[times, locks, tables, sequtils, hashes]
import chronicles, results
import ./rolling_bloom_filter
import ./types/[
sds_message_id, history_entry, sds_message, unacknowledged_message, incoming_message,
reliability_error, callbacks, app_callbacks, reliability_config, repair_entry,
channel_context, reliability_manager,
]
export
sds_message_id, history_entry, sds_message, unacknowledged_message, incoming_message,
reliability_error, callbacks, app_callbacks, reliability_config, repair_entry,
channel_context, reliability_manager
proc defaultConfig*(): ReliabilityConfig =
return ReliabilityConfig.init()
proc cleanup*(rm: ReliabilityManager) {.raises: [].} =
if not rm.isNil():
try:
withLock rm.lock:
for channelId, channel in rm.channels:
channel.outgoingBuffer.setLen(0)
channel.incomingBuffer.clear()
channel.messageHistory.clear()
channel.outgoingRepairBuffer.clear()
channel.incomingRepairBuffer.clear()
rm.channels.clear()
except Exception:
error "Error during cleanup", error = getCurrentExceptionMsg()
proc cleanBloomFilter*(
rm: ReliabilityManager, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
withLock rm.lock:
try:
if channelId in rm.channels:
rm.channels[channelId].bloomFilter.clean()
except Exception:
error "Failed to clean bloom filter",
error = getCurrentExceptionMsg(), channelId = channelId
proc addToHistory*(
rm: ReliabilityManager, msg: SdsMessage, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
## Inserts a delivered message into the channel's history map and evicts the
## eldest entries when the bound is exceeded. The full SdsMessage is kept so
## senderId is available for downstream causal-history population and the
## bytes can be re-serialized on demand to answer SDS-R repair requests.
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.messageHistory[msg.messageId] = msg
while channel.messageHistory.len > rm.config.maxMessageHistory:
var firstKey: SdsMessageID
for k in channel.messageHistory.keys:
firstKey = k
break
channel.messageHistory.del(firstKey)
except Exception:
error "Failed to add to history",
channelId = channelId, msgId = msg.messageId, error = getCurrentExceptionMsg()
proc updateLamportTimestamp*(
rm: ReliabilityManager, msgTs: int64, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.lamportTimestamp = max(msgTs, channel.lamportTimestamp) + 1
except Exception:
error "Failed to update lamport timestamp",
channelId = channelId, msgTs = msgTs, error = getCurrentExceptionMsg()
proc newHistoryEntry*(messageId: SdsMessageID, retrievalHint: seq[byte] = @[]): HistoryEntry =
return HistoryEntry.init(messageId, retrievalHint)
proc toCausalHistory*(messageIds: seq[SdsMessageID]): seq[HistoryEntry] =
return messageIds.mapIt(newHistoryEntry(it))
proc getMessageIds*(causalHistory: seq[HistoryEntry]): seq[SdsMessageID] =
return causalHistory.mapIt(it.messageId)
## SDS-R: Repair computation functions
proc computeTReq*(
participantId: SdsParticipantID,
messageId: SdsMessageID,
tMin: Duration,
tMax: Duration,
): Duration =
## Computes the repair request backoff duration per SDS-R spec:
## T_req = hash(participant_id, message_id) % (T_max - T_min) + T_min
let h = abs(hash(participantId.string & messageId))
let rangeMs = tMax.inMilliseconds - tMin.inMilliseconds
if rangeMs <= 0:
return tMin
let offsetMs = h mod rangeMs
initDuration(milliseconds = tMin.inMilliseconds + offsetMs)
proc computeTResp*(
participantId: SdsParticipantID,
senderId: SdsParticipantID,
messageId: SdsMessageID,
tMax: Duration,
): Duration =
## Computes the repair response backoff duration per SDS-R spec:
## distance = hash(participant_id) XOR hash(sender_id)
## T_resp = distance * hash(message_id) % T_max
## Original sender has distance=0, so T_resp=0 (responds immediately).
let distance = abs(hash(participantId) xor hash(senderId))
let msgHash = abs(hash(messageId))
let tMaxMs = tMax.inMilliseconds
if tMaxMs <= 0 or distance == 0:
return initDuration(milliseconds = 0)
# Use uint64 to avoid overflow on multiplication
let d = uint64(distance mod tMaxMs)
let m = uint64(msgHash mod tMaxMs)
let offsetMs = int64((d * m) mod uint64(tMaxMs))
initDuration(milliseconds = offsetMs)
proc isInResponseGroup*(
participantId: SdsParticipantID,
senderId: SdsParticipantID,
messageId: SdsMessageID,
numResponseGroups: int,
): bool =
## Determines if this participant is in the response group for a given message per SDS-R spec:
## hash(participant_id, message_id) % num_groups == hash(sender_id, message_id) % num_groups
if numResponseGroups <= 1:
return true # All participants in the same group
let myGroup = abs(hash(participantId.string & messageId)) mod numResponseGroups
let senderGroup = abs(hash(senderId.string & messageId)) mod numResponseGroups
myGroup == senderGroup
proc getRecentHistoryEntries*(
rm: ReliabilityManager, n: int, channelId: SdsChannelID
): seq[HistoryEntry] =
## Get recent history entries for sending in causal history.
## Populates retrieval hints and senderId (SDS-R) for each entry.
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
var orderedIds: seq[SdsMessageID] = @[]
for msgId in channel.messageHistory.keys:
orderedIds.add(msgId)
let recentMessageIds =
orderedIds[max(0, orderedIds.len - n) .. ^1]
var entries: seq[HistoryEntry] = @[]
for msgId in recentMessageIds:
var entry = HistoryEntry(messageId: msgId)
if not rm.onRetrievalHint.isNil():
entry.retrievalHint = rm.onRetrievalHint(msgId)
entry.senderId = channel.messageHistory[msgId].senderId
entries.add(entry)
return entries
else:
return @[]
except Exception:
error "Failed to get recent history entries",
channelId = channelId, n = n, error = getCurrentExceptionMsg()
return @[]
proc checkDependencies*(
rm: ReliabilityManager, deps: seq[HistoryEntry], channelId: SdsChannelID
): seq[HistoryEntry] =
var missingDeps: seq[HistoryEntry] = @[]
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
for dep in deps:
if dep.messageId notin channel.messageHistory:
missingDeps.add(dep)
else:
missingDeps = deps
except Exception:
error "Failed to check dependencies",
channelId = channelId, error = getCurrentExceptionMsg()
missingDeps = deps
return missingDeps
proc getMessageHistory*(
rm: ReliabilityManager, channelId: SdsChannelID
): seq[SdsMessageID] =
withLock rm.lock:
try:
if channelId in rm.channels:
var ids: seq[SdsMessageID] = @[]
for msgId in rm.channels[channelId].messageHistory.keys:
ids.add(msgId)
return ids
else:
return @[]
except Exception:
error "Failed to get message history",
channelId = channelId, error = getCurrentExceptionMsg()
return @[]
proc getOutgoingBuffer*(
rm: ReliabilityManager, channelId: SdsChannelID
): seq[UnacknowledgedMessage] =
withLock rm.lock:
try:
if channelId in rm.channels:
return rm.channels[channelId].outgoingBuffer
else:
return @[]
except Exception:
error "Failed to get outgoing buffer",
channelId = channelId, error = getCurrentExceptionMsg()
return @[]
proc getIncomingBuffer*(
rm: ReliabilityManager, channelId: SdsChannelID
): Table[SdsMessageID, IncomingMessage] =
withLock rm.lock:
try:
if channelId in rm.channels:
return rm.channels[channelId].incomingBuffer
else:
return initTable[SdsMessageID, IncomingMessage]()
except Exception:
error "Failed to get incoming buffer",
channelId = channelId, error = getCurrentExceptionMsg()
return initTable[SdsMessageID, IncomingMessage]()
proc getOrCreateChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): ChannelContext =
try:
if channelId notin rm.channels:
rm.channels[channelId] = ChannelContext.new(
RollingBloomFilter.init(rm.config.bloomFilterCapacity, rm.config.bloomFilterErrorRate)
)
return rm.channels[channelId]
except Exception:
error "Failed to get or create channel",
channelId = channelId, error = getCurrentExceptionMsg()
raise
proc ensureChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): Result[void, ReliabilityError] =
withLock rm.lock:
try:
discard rm.getOrCreateChannel(channelId)
return ok()
except Exception:
error "Failed to ensure channel",
channelId = channelId, msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)
proc removeChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): Result[void, ReliabilityError] =
withLock rm.lock:
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.outgoingBuffer.setLen(0)
channel.incomingBuffer.clear()
channel.messageHistory.clear()
channel.outgoingRepairBuffer.clear()
channel.incomingRepairBuffer.clear()
rm.channels.del(channelId)
return ok()
except Exception:
error "Failed to remove channel",
channelId = channelId, msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)

View File

@ -1,32 +0,0 @@
import sds/types/sds_message_id
import sds/types/history_entry
import sds/types/sds_message
import sds/types/unacknowledged_message
import sds/types/incoming_message
import sds/types/bloom_filter
import sds/types/rolling_bloom_filter
import sds/types/reliability_error
import sds/types/callbacks
import sds/types/app_callbacks
import sds/types/reliability_config
import sds/types/repair_entry
import sds/types/channel_context
import sds/types/reliability_manager
import sds/types/protobuf_error
export
sds_message_id,
history_entry,
sds_message,
unacknowledged_message,
incoming_message,
bloom_filter,
rolling_bloom_filter,
reliability_error,
callbacks,
app_callbacks,
reliability_config,
repair_entry,
channel_context,
reliability_manager,
protobuf_error

View File

@ -1,28 +0,0 @@
import ./callbacks
export callbacks
type AppCallbacks* = ref object
messageReadyCb*: MessageReadyCallback
messageSentCb*: MessageSentCallback
missingDependenciesCb*: MissingDependenciesCallback
periodicSyncCb*: PeriodicSyncCallback
retrievalHintProvider*: RetrievalHintProvider
repairReadyCb*: RepairReadyCallback
proc new*(
T: type AppCallbacks,
messageReadyCb: MessageReadyCallback = nil,
messageSentCb: MessageSentCallback = nil,
missingDependenciesCb: MissingDependenciesCallback = nil,
periodicSyncCb: PeriodicSyncCallback = nil,
retrievalHintProvider: RetrievalHintProvider = nil,
repairReadyCb: RepairReadyCallback = nil,
): T =
return T(
messageReadyCb: messageReadyCb,
messageSentCb: messageSentCb,
missingDependenciesCb: missingDependenciesCb,
periodicSyncCb: periodicSyncCb,
retrievalHintProvider: retrievalHintProvider,
repairReadyCb: repairReadyCb,
)

View File

@ -1,22 +0,0 @@
type BloomFilter* {.requiresInit.} = object
capacity*: int
errorRate*: float
kHashes*: int
mBits*: int
intArray*: seq[int]
proc init*(
T: type BloomFilter,
capacity: int,
errorRate: float,
kHashes: int,
mBits: int,
intArray: seq[int],
): T =
return T(
capacity: capacity,
errorRate: errorRate,
kHashes: kHashes,
mBits: mBits,
intArray: intArray,
)

View File

@ -1,20 +0,0 @@
import ./sds_message_id
import ./history_entry
export sds_message_id, history_entry
type
MessageReadyCallback* =
proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
MessageSentCallback* =
proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
MissingDependenciesCallback* = proc(
messageId: SdsMessageID, missingDeps: seq[HistoryEntry], channelId: SdsChannelID
) {.gcsafe.}
RetrievalHintProvider* = proc(messageId: SdsMessageID): seq[byte] {.gcsafe.}
PeriodicSyncCallback* = proc() {.gcsafe, raises: [].}
RepairReadyCallback* = proc(message: seq[byte], channelId: SdsChannelID) {.gcsafe.}

View File

@ -1,36 +0,0 @@
import std/tables
import ./sds_message_id
import ./sds_message
import ./rolling_bloom_filter
import ./unacknowledged_message
import ./incoming_message
import ./repair_entry
export
sds_message_id, sds_message, rolling_bloom_filter, unacknowledged_message,
incoming_message, repair_entry
type ChannelContext* = ref object
lamportTimestamp*: int64
messageHistory*: OrderedTable[SdsMessageID, SdsMessage]
## Single source of truth for delivered messages. Holds the deserialized
## SdsMessage (which carries senderId, lamportTimestamp, content, etc.) so
## causal history, sender lookup, and SDS-R repair responses can all be
## answered from one place. OrderedTable preserves insertion order for
## causal-history tail access and FIFO eviction at maxMessageHistory.
bloomFilter*: RollingBloomFilter
outgoingBuffer*: seq[UnacknowledgedMessage]
incomingBuffer*: Table[SdsMessageID, IncomingMessage]
## SDS-R buffers
outgoingRepairBuffer*: Table[SdsMessageID, OutgoingRepairEntry]
incomingRepairBuffer*: Table[SdsMessageID, IncomingRepairEntry]
proc new*(T: type ChannelContext, bloomFilter: RollingBloomFilter): T =
return T(
lamportTimestamp: 0,
messageHistory: initOrderedTable[SdsMessageID, SdsMessage](),
bloomFilter: bloomFilter,
outgoingBuffer: @[],
incomingBuffer: initTable[SdsMessageID, IncomingMessage](),
outgoingRepairBuffer: initTable[SdsMessageID, OutgoingRepairEntry](),
incomingRepairBuffer: initTable[SdsMessageID, IncomingRepairEntry](),
)

View File

@ -1,15 +0,0 @@
import ./sds_message_id
export sds_message_id
type HistoryEntry* = object
messageId*: SdsMessageID
retrievalHint*: seq[byte] ## Optional hint for efficient retrieval (e.g., Waku message hash)
senderId*: SdsParticipantID ## Original message sender's participant ID (SDS-R)
proc init*(
T: type HistoryEntry,
messageId: SdsMessageID,
retrievalHint: seq[byte] = @[],
senderId: SdsParticipantID = "".SdsParticipantID,
): T =
return T(messageId: messageId, retrievalHint: retrievalHint, senderId: senderId)

View File

@ -1,13 +0,0 @@
import std/sets
import ./sds_message_id
import ./sds_message
export sds_message_id, sds_message
type IncomingMessage* {.requiresInit.} = object
message*: SdsMessage
missingDeps*: HashSet[SdsMessageID]
proc init*(
T: type IncomingMessage, message: SdsMessage, missingDeps: HashSet[SdsMessageID]
): T =
return T(message: message, missingDeps: missingDeps)

View File

@ -1,22 +0,0 @@
import results
import libp2p/protobuf/minprotobuf
type
ProtobufErrorKind* {.pure.} = enum
DecodeFailure
MissingRequiredField
ProtobufError* = object
case kind*: ProtobufErrorKind
of DecodeFailure:
error*: minprotobuf.ProtoError
of MissingRequiredField:
field*: string
ProtobufResult*[T] = Result[T, ProtobufError]
proc init*(T: type ProtobufError, error: minprotobuf.ProtoError): T =
return T(kind: ProtobufErrorKind.DecodeFailure, error: error)
proc init*(T: type ProtobufError, field: string): T =
return T(kind: ProtobufErrorKind.MissingRequiredField, field: field)

View File

@ -1,66 +0,0 @@
import std/times
const
DefaultMaxMessageHistory* = 1000
DefaultMaxCausalHistory* = 10
DefaultResendInterval* = initDuration(seconds = 60)
DefaultMaxResendAttempts* = 5
DefaultSyncMessageInterval* = initDuration(seconds = 30)
DefaultBufferSweepInterval* = initDuration(seconds = 60)
DefaultRepairTMin* = initDuration(seconds = 30)
DefaultRepairTMax* = initDuration(seconds = 300)
DefaultNumResponseGroups* = 1
DefaultMaxRepairRequests* = 3
DefaultRepairSweepInterval* = initDuration(seconds = 5)
MaxMessageSize* = 1024 * 1024 # 1 MB
import ./rolling_bloom_filter
export rolling_bloom_filter
type ReliabilityConfig* {.requiresInit.} = object
bloomFilterCapacity*: int
bloomFilterErrorRate*: float
maxMessageHistory*: int
maxCausalHistory*: int
resendInterval*: Duration
maxResendAttempts*: int
syncMessageInterval*: Duration
bufferSweepInterval*: Duration
## SDS-R config
repairTMin*: Duration
repairTMax*: Duration
numResponseGroups*: int
maxRepairRequests*: int
repairSweepInterval*: Duration
proc init*(
T: type ReliabilityConfig,
bloomFilterCapacity: int = DefaultBloomFilterCapacity,
bloomFilterErrorRate: float = DefaultBloomFilterErrorRate,
maxMessageHistory: int = DefaultMaxMessageHistory,
maxCausalHistory: int = DefaultMaxCausalHistory,
resendInterval: Duration = DefaultResendInterval,
maxResendAttempts: int = DefaultMaxResendAttempts,
syncMessageInterval: Duration = DefaultSyncMessageInterval,
bufferSweepInterval: Duration = DefaultBufferSweepInterval,
repairTMin: Duration = DefaultRepairTMin,
repairTMax: Duration = DefaultRepairTMax,
numResponseGroups: int = DefaultNumResponseGroups,
maxRepairRequests: int = DefaultMaxRepairRequests,
repairSweepInterval: Duration = DefaultRepairSweepInterval,
): T =
return T(
bloomFilterCapacity: bloomFilterCapacity,
bloomFilterErrorRate: bloomFilterErrorRate,
maxMessageHistory: maxMessageHistory,
maxCausalHistory: maxCausalHistory,
resendInterval: resendInterval,
maxResendAttempts: maxResendAttempts,
syncMessageInterval: syncMessageInterval,
bufferSweepInterval: bufferSweepInterval,
repairTMin: repairTMin,
repairTMax: repairTMax,
numResponseGroups: numResponseGroups,
maxRepairRequests: maxRepairRequests,
repairSweepInterval: repairSweepInterval,
)

View File

@ -1,7 +0,0 @@
type ReliabilityError* {.pure.} = enum
reInvalidArgument
reOutOfMemory
reInternalError
reSerializationError
reDeserializationError
reMessageTooLarge

View File

@ -1,34 +0,0 @@
import std/[tables, locks]
import ./sds_message_id
import ./history_entry
import ./callbacks
import ./reliability_config
import ./channel_context
export sds_message_id, history_entry, callbacks, reliability_config, channel_context
type ReliabilityManager* = ref object
channels*: Table[SdsChannelID, ChannelContext]
config*: ReliabilityConfig
participantId*: SdsParticipantID
lock*: Lock
onMessageReady*: proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
onMessageSent*: proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
onMissingDependencies*: proc(
messageId: SdsMessageID, missingDeps: seq[HistoryEntry], channelId: SdsChannelID
) {.gcsafe.}
onPeriodicSync*: PeriodicSyncCallback
onRetrievalHint*: RetrievalHintProvider
onRepairReady*: RepairReadyCallback
proc new*(
T: type ReliabilityManager,
config: ReliabilityConfig,
participantId: SdsParticipantID = "".SdsParticipantID,
): T =
let rm = T(
channels: initTable[SdsChannelID, ChannelContext](),
config: config,
participantId: participantId,
)
rm.lock.initLock()
return rm

View File

@ -1,36 +0,0 @@
import std/times
import ./history_entry
export history_entry
type
OutgoingRepairEntry* {.requiresInit.} = object
## Entry in the outgoing repair request buffer (SDS-R).
## Tracks a missing message we want to request repair for.
outHistEntry*: HistoryEntry ## The missing history entry
minTimeRepairReq*: Time
## Earliest time at which we will include this in a repair request (T_REQ in spec)
IncomingRepairEntry* {.requiresInit.} = object
## Entry in the incoming repair request buffer (SDS-R).
## Tracks a repair request from a remote peer that we might respond to.
inHistEntry*: HistoryEntry ## The requested history entry
cachedMessage*: seq[byte] ## Full serialized SDS message for rebroadcast
minTimeRepairResp*: Time
## Earliest time at which we will rebroadcast (T_RESP in spec)
proc init*(
T: type OutgoingRepairEntry, outHistEntry: HistoryEntry, minTimeRepairReq: Time
): T =
return T(outHistEntry: outHistEntry, minTimeRepairReq: minTimeRepairReq)
proc init*(
T: type IncomingRepairEntry,
inHistEntry: HistoryEntry,
cachedMessage: seq[byte],
minTimeRepairResp: Time,
): T =
return T(
inHistEntry: inHistEntry,
cachedMessage: cachedMessage,
minTimeRepairResp: minTimeRepairResp,
)

View File

@ -1,31 +0,0 @@
import ./bloom_filter
import ./sds_message_id
export bloom_filter, sds_message_id
const
DefaultBloomFilterCapacity* = 10000
DefaultBloomFilterErrorRate* = 0.001
CapacityFlexPercent* = 20
type RollingBloomFilter* {.requiresInit.} = object
filter*: BloomFilter
capacity*: int
minCapacity*: int
maxCapacity*: int
messages*: seq[SdsMessageID]
proc init*(
T: type RollingBloomFilter,
filter: BloomFilter,
capacity: int,
minCapacity: int,
maxCapacity: int,
messages: seq[SdsMessageID] = @[],
): T =
return T(
filter: filter,
capacity: capacity,
minCapacity: minCapacity,
maxCapacity: maxCapacity,
messages: messages,
)

View File

@ -1,36 +0,0 @@
import ./sds_message_id
import ./history_entry
export sds_message_id, history_entry
type SdsMessage* {.requiresInit.} = object
messageId*: SdsMessageID
lamportTimestamp*: int64
causalHistory*: seq[HistoryEntry]
channelId*: SdsChannelID
content*: seq[byte]
bloomFilter*: seq[byte]
senderId*: SdsParticipantID ## SDS-R: original sender's participant ID
repairRequest*: seq[HistoryEntry]
## Capped list of missing entries requesting repair (SDS-R)
proc init*(
T: type SdsMessage,
messageId: SdsMessageID,
lamportTimestamp: int64,
causalHistory: seq[HistoryEntry],
channelId: SdsChannelID,
content: seq[byte],
bloomFilter: seq[byte],
senderId: SdsParticipantID = "".SdsParticipantID,
repairRequest: seq[HistoryEntry] = @[],
): T =
return T(
messageId: messageId,
lamportTimestamp: lamportTimestamp,
causalHistory: causalHistory,
channelId: channelId,
content: content,
bloomFilter: bloomFilter,
senderId: senderId,
repairRequest: repairRequest,
)

View File

@ -1,11 +0,0 @@
import std/hashes
type
SdsMessageID* = string
SdsChannelID* = string
SdsParticipantID* = distinct string
proc `==`*(a, b: SdsParticipantID): bool {.borrow.}
proc `$`*(p: SdsParticipantID): string {.borrow.}
proc len*(p: SdsParticipantID): int {.borrow.}
proc hash*(p: SdsParticipantID): Hash {.borrow.}

View File

@ -1,13 +0,0 @@
import std/times
import ./sds_message
export sds_message
type UnacknowledgedMessage* = object
message*: SdsMessage
sendTime*: Time
resendAttempts*: int
proc init*(
T: type UnacknowledgedMessage, message: SdsMessage, sendTime: Time, resendAttempts: int
): T =
return T(message: message, sendTime: sendTime, resendAttempts: resendAttempts)

View File

@ -3,8 +3,13 @@ import hashes
import strutils import strutils
import results import results
import private/probabilities import private/probabilities
import ./types/bloom_filter
export bloom_filter type BloomFilter* = object
capacity*: int
errorRate*: float
kHashes*: int
mBits*: int
intArray*: seq[int]
{.push overflowChecks: off.} # Turn off overflow checks for hashing operations {.push overflowChecks: off.} # Turn off overflow checks for hashing operations
@ -15,7 +20,13 @@ proc hashN(item: string, n: int, maxValue: int): int =
let let
hashA = abs(hash(item)) mod maxValue # Use abs to handle negative hashes hashA = abs(hash(item)) mod maxValue # Use abs to handle negative hashes
hashB = abs(hash(item & " b")) mod maxValue # string concatenation hashB = abs(hash(item & " b")) mod maxValue # string concatenation
return abs((hashA + n * hashB)) mod maxValue abs((hashA + n * hashB)) mod maxValue
# # Use bit rotation for second hash instead of string concatenation if speed if preferred over FP-rate
# # Rotate left by 21 bits (lower the rotation, higher the speed but higher the FP-rate too)
# hashB = abs(
# ((h shl 21) or (h shr (sizeof(int) * 8 - 21)))
# ) mod maxValue
# abs((hashA + n.int64 * hashB)) mod maxValue
{.pop.} {.pop.}
@ -30,7 +41,7 @@ proc getMOverNBitsForK*(
if probabilityTable[k][mOverN] < targetError: if probabilityTable[k][mOverN] < targetError:
return ok(mOverN) return ok(mOverN)
return err( err(
"Specified value of k and error rate not achievable using less than 4 bytes / element." "Specified value of k and error rate not achievable using less than 4 bytes / element."
) )
@ -68,19 +79,19 @@ proc initializeBloomFilter*(
mBits = capacity * nBitsPerElem mBits = capacity * nBitsPerElem
mInts = 1 + mBits div (sizeof(int) * 8) mInts = 1 + mBits div (sizeof(int) * 8)
return ok( ok(
BloomFilter.init( BloomFilter(
capacity = capacity, capacity: capacity,
errorRate = errorRate, errorRate: errorRate,
kHashes = kHashes, kHashes: kHashes,
mBits = mBits, mBits: mBits,
intArray = newSeq[int](mInts), intArray: newSeq[int](mInts),
) )
) )
proc `$`*(bf: BloomFilter): string = proc `$`*(bf: BloomFilter): string =
## Prints the configuration of the Bloom filter. ## Prints the configuration of the Bloom filter.
return "Bloom filter with $1 capacity, $2 error rate, $3 hash functions, and requiring $4 bits of memory." % "Bloom filter with $1 capacity, $2 error rate, $3 hash functions, and requiring $4 bits of memory." %
[ [
$bf.capacity, $bf.capacity,
formatFloat(bf.errorRate, format = ffScientific, precision = 1), formatFloat(bf.errorRate, format = ffScientific, precision = 1),
@ -92,7 +103,7 @@ proc computeHashes(bf: BloomFilter, item: string): seq[int] =
var hashes = newSeq[int](bf.kHashes) var hashes = newSeq[int](bf.kHashes)
for i in 0 ..< bf.kHashes: for i in 0 ..< bf.kHashes:
hashes[i] = hashN(item, i, bf.mBits) hashes[i] = hashN(item, i, bf.mBits)
return hashes hashes
proc insert*(bf: var BloomFilter, item: string) = proc insert*(bf: var BloomFilter, item: string) =
## Insert an item (string) into the Bloom filter. ## Insert an item (string) into the Bloom filter.
@ -116,4 +127,4 @@ proc lookup*(bf: BloomFilter, item: string): bool =
currentInt = bf.intArray[intAddress] currentInt = bf.intArray[intAddress]
if currentInt != (currentInt or (1 shl bitOffset)): if currentInt != (currentInt or (1 shl bitOffset)):
return false return false
return true true

31
src/message.nim Normal file
View File

@ -0,0 +1,31 @@
import std/[times, sets]
type
SdsMessageID* = string
SdsChannelID* = string
SdsMessage* = object
messageId*: SdsMessageID
lamportTimestamp*: int64
causalHistory*: seq[SdsMessageID]
channelId*: SdsChannelID
content*: seq[byte]
bloomFilter*: seq[byte]
UnacknowledgedMessage* = object
message*: SdsMessage
sendTime*: Time
resendAttempts*: int
IncomingMessage* = object
message*: SdsMessage
missingDeps*: HashSet[SdsMessageID]
const
DefaultMaxMessageHistory* = 1000
DefaultMaxCausalHistory* = 10
DefaultResendInterval* = initDuration(seconds = 60)
DefaultMaxResendAttempts* = 5
DefaultSyncMessageInterval* = initDuration(seconds = 30)
DefaultBufferSweepInterval* = initDuration(seconds = 60)
MaxMessageSize* = 1024 * 1024 # 1 MB

View File

@ -1,29 +1,7 @@
import libp2p/protobuf/minprotobuf import libp2p/protobuf/minprotobuf
import std/options
import endians import endians
import ./types/[sds_message_id, history_entry, sds_message, reliability_error] import ../src/[message, protobufutil, bloom, reliability_utils]
import ./protobufutil
import ./bloom
import ./sds_utils
proc encodeHistoryEntry*(entry: HistoryEntry): ProtoBuffer =
var entryPb = initProtoBuffer()
entryPb.write(1, entry.messageId)
if entry.retrievalHint.len > 0:
entryPb.write(2, entry.retrievalHint)
if entry.senderId.len > 0:
entryPb.write(3, entry.senderId.string)
entryPb.finish()
entryPb
proc decodeHistoryEntry*(entryPb: ProtoBuffer): ProtobufResult[HistoryEntry] =
var entry = HistoryEntry.init("")
if not ?entryPb.getField(1, entry.messageId):
return err(ProtobufError.missingRequiredField("HistoryEntry.messageId"))
discard entryPb.getField(2, entry.retrievalHint)
var senderIdStr: string
if entryPb.getField(3, senderIdStr).valueOr(false):
entry.senderId = senderIdStr.SdsParticipantID
ok(entry)
proc encode*(msg: SdsMessage): ProtoBuffer = proc encode*(msg: SdsMessage): ProtoBuffer =
var pb = initProtoBuffer() var pb = initProtoBuffer()
@ -31,28 +9,19 @@ proc encode*(msg: SdsMessage): ProtoBuffer =
pb.write(1, msg.messageId) pb.write(1, msg.messageId)
pb.write(2, uint64(msg.lamportTimestamp)) pb.write(2, uint64(msg.lamportTimestamp))
for entry in msg.causalHistory: for hist in msg.causalHistory:
let entryPb = encodeHistoryEntry(entry) pb.write(3, hist)
pb.write(3, entryPb.buffer)
pb.write(4, msg.channelId) pb.write(4, msg.channelId)
pb.write(5, msg.content) pb.write(5, msg.content)
pb.write(6, msg.bloomFilter) pb.write(6, msg.bloomFilter)
if msg.senderId.len > 0:
pb.write(7, msg.senderId.string)
for entry in msg.repairRequest:
let entryPb = encodeHistoryEntry(entry)
pb.write(13, entryPb.buffer)
pb.finish() pb.finish()
return pb pb
proc decode*(T: type SdsMessage, buffer: seq[byte]): ProtobufResult[T] = proc decode*(T: type SdsMessage, buffer: seq[byte]): ProtobufResult[T] =
let pb = initProtoBuffer(buffer) let pb = initProtoBuffer(buffer)
var msg = SdsMessage.init("", 0, @[], "", @[], @[]) var msg = SdsMessage()
if not ?pb.getField(1, msg.messageId): if not ?pb.getField(1, msg.messageId):
return err(ProtobufError.missingRequiredField("messageId")) return err(ProtobufError.missingRequiredField("messageId"))
@ -62,20 +31,10 @@ proc decode*(T: type SdsMessage, buffer: seq[byte]): ProtobufResult[T] =
return err(ProtobufError.missingRequiredField("lamportTimestamp")) return err(ProtobufError.missingRequiredField("lamportTimestamp"))
msg.lamportTimestamp = int64(timestamp) msg.lamportTimestamp = int64(timestamp)
# Handle both old and new causal history formats
var historyBuffers: seq[seq[byte]]
if pb.getRepeatedField(3, historyBuffers).isOk():
# New format: repeated HistoryEntry
for histBuffer in historyBuffers:
let entryPb = initProtoBuffer(histBuffer)
let entry = ?decodeHistoryEntry(entryPb)
msg.causalHistory.add(entry)
else:
# Try old format: repeated string
var causalHistory: seq[SdsMessageID] var causalHistory: seq[SdsMessageID]
let histResult = pb.getRepeatedField(3, causalHistory) let histResult = pb.getRepeatedField(3, causalHistory)
if histResult.isOk(): if histResult.isOk:
msg.causalHistory = toCausalHistory(causalHistory) msg.causalHistory = causalHistory
if not ?pb.getField(4, msg.channelId): if not ?pb.getField(4, msg.channelId):
return err(ProtobufError.missingRequiredField("channelId")) return err(ProtobufError.missingRequiredField("channelId"))
@ -86,20 +45,7 @@ proc decode*(T: type SdsMessage, buffer: seq[byte]): ProtobufResult[T] =
if not ?pb.getField(6, msg.bloomFilter): if not ?pb.getField(6, msg.bloomFilter):
msg.bloomFilter = @[] # Empty if not present msg.bloomFilter = @[] # Empty if not present
# SDS-R: decode senderId (field 7, optional) ok(msg)
var msgSenderIdStr: string
if pb.getField(7, msgSenderIdStr).valueOr(false):
msg.senderId = msgSenderIdStr.SdsParticipantID
# SDS-R: decode repair request (field 13, optional)
var repairBuffers: seq[seq[byte]]
if pb.getRepeatedField(13, repairBuffers).isOk():
for repairBuffer in repairBuffers:
let entryPb = initProtoBuffer(repairBuffer)
let entry = ?decodeHistoryEntry(entryPb)
msg.repairRequest.add(entry)
return ok(msg)
proc extractChannelId*(data: seq[byte]): Result[SdsChannelID, ReliabilityError] = proc extractChannelId*(data: seq[byte]): Result[SdsChannelID, ReliabilityError] =
## For extraction of channel ID without full message deserialization ## For extraction of channel ID without full message deserialization
@ -110,22 +56,23 @@ proc extractChannelId*(data: seq[byte]): Result[SdsChannelID, ReliabilityError]
return err(ReliabilityError.reDeserializationError) return err(ReliabilityError.reDeserializationError)
if not fieldOk: if not fieldOk:
return err(ReliabilityError.reDeserializationError) return err(ReliabilityError.reDeserializationError)
return ok(channelId) ok(channelId)
except: except:
return err(ReliabilityError.reDeserializationError) err(ReliabilityError.reDeserializationError)
proc serializeMessage*(msg: SdsMessage): Result[seq[byte], ReliabilityError] = proc serializeMessage*(msg: SdsMessage): Result[seq[byte], ReliabilityError] =
let pb = encode(msg) let pb = encode(msg)
return ok(pb.buffer) ok(pb.buffer)
proc deserializeMessage*(data: seq[byte]): Result[SdsMessage, ReliabilityError] = proc deserializeMessage*(data: seq[byte]): Result[SdsMessage, ReliabilityError] =
let msg = SdsMessage.decode(data).valueOr: let msg = SdsMessage.decode(data).valueOr:
return err(ReliabilityError.reDeserializationError) return err(ReliabilityError.reDeserializationError)
return ok(msg) ok(msg)
proc serializeBloomFilter*(filter: BloomFilter): Result[seq[byte], ReliabilityError] = proc serializeBloomFilter*(filter: BloomFilter): Result[seq[byte], ReliabilityError] =
var pb = initProtoBuffer() var pb = initProtoBuffer()
# Convert intArray to bytes
try: try:
var bytes = newSeq[byte](filter.intArray.len * sizeof(int)) var bytes = newSeq[byte](filter.intArray.len * sizeof(int))
for i, val in filter.intArray: for i, val in filter.intArray:
@ -143,7 +90,7 @@ proc serializeBloomFilter*(filter: BloomFilter): Result[seq[byte], ReliabilityEr
return err(ReliabilityError.reSerializationError) return err(ReliabilityError.reSerializationError)
pb.finish() pb.finish()
return ok(pb.buffer) ok(pb.buffer)
proc deserializeBloomFilter*(data: seq[byte]): Result[BloomFilter, ReliabilityError] = proc deserializeBloomFilter*(data: seq[byte]): Result[BloomFilter, ReliabilityError] =
if data.len == 0: if data.len == 0:
@ -169,6 +116,7 @@ proc deserializeBloomFilter*(data: seq[byte]): Result[BloomFilter, ReliabilityEr
if not field1_Ok or not field2_Ok or not field3_Ok or not field4_Ok or not field5_Ok: if not field1_Ok or not field2_Ok or not field3_Ok or not field4_Ok or not field5_Ok:
return err(ReliabilityError.reDeserializationError) return err(ReliabilityError.reDeserializationError)
# Convert bytes back to intArray
var intArray = newSeq[int](bytes.len div sizeof(int)) var intArray = newSeq[int](bytes.len div sizeof(int))
for i in 0 ..< intArray.len: for i in 0 ..< intArray.len:
var leVal: int var leVal: int
@ -176,13 +124,13 @@ proc deserializeBloomFilter*(data: seq[byte]): Result[BloomFilter, ReliabilityEr
copyMem(addr leVal, unsafeAddr bytes[start], sizeof(int)) copyMem(addr leVal, unsafeAddr bytes[start], sizeof(int))
littleEndian64(addr intArray[i], addr leVal) littleEndian64(addr intArray[i], addr leVal)
return ok( ok(
BloomFilter.init( BloomFilter(
capacity = int(cap), intArray: intArray,
errorRate = float(errRate) / 1_000_000, capacity: int(cap),
kHashes = int(kHashes), errorRate: float(errRate) / 1_000_000,
mBits = int(mBits), kHashes: int(kHashes),
intArray = intArray, mBits: int(mBits),
) )
) )
except: except:

32
src/protobufutil.nim Normal file
View File

@ -0,0 +1,32 @@
# adapted from https://github.com/waku-org/nwaku/blob/master/waku/common/protobuf.nim
{.push raises: [].}
import libp2p/protobuf/minprotobuf
import libp2p/varint
export minprotobuf, varint
type
ProtobufErrorKind* {.pure.} = enum
DecodeFailure
MissingRequiredField
ProtobufError* = object
case kind*: ProtobufErrorKind
of DecodeFailure:
error*: minprotobuf.ProtoError
of MissingRequiredField:
field*: string
ProtobufResult*[T] = Result[T, ProtobufError]
converter toProtobufError*(err: minprotobuf.ProtoError): ProtobufError =
case err
of minprotobuf.ProtoError.RequiredFieldMissing:
ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: "unknown")
else:
ProtobufError(kind: ProtobufErrorKind.DecodeFailure, error: err)
proc missingRequiredField*(T: type ProtobufError, field: string): T =
ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: field)

View File

@ -1,16 +1,22 @@
import std/[algorithm, times, locks, tables, sets, options] import std/[times, locks, tables, sets, options]
import chronos, results, chronicles import chronos, results, chronicles
import sds/[types, protobuf, sds_utils, rolling_bloom_filter] import ./[message, protobuf, reliability_utils, rolling_bloom_filter]
export types, protobuf, sds_utils, rolling_bloom_filter
proc newReliabilityManager*( proc newReliabilityManager*(
config: ReliabilityConfig = defaultConfig(), config: ReliabilityConfig = defaultConfig()
participantId: SdsParticipantID = "".SdsParticipantID,
): Result[ReliabilityManager, ReliabilityError] = ): Result[ReliabilityManager, ReliabilityError] =
## Creates a new multi-channel ReliabilityManager. ## Creates a new multi-channel ReliabilityManager.
##
## Parameters:
## - config: Configuration options for the ReliabilityManager. If not provided, default configuration is used.
##
## Returns:
## A Result containing either a new ReliabilityManager instance or an error.
try: try:
let rm = ReliabilityManager.new(config, participantId) let rm = ReliabilityManager(
channels: initTable[SdsChannelID, ChannelContext](), config: config
)
initLock(rm.lock)
return ok(rm) return ok(rm)
except Exception: except Exception:
error "Failed to create ReliabilityManager", msg = getCurrentExceptionMsg() error "Failed to create ReliabilityManager", msg = getCurrentExceptionMsg()
@ -18,29 +24,34 @@ proc newReliabilityManager*(
proc isAcknowledged*( proc isAcknowledged*(
msg: UnacknowledgedMessage, msg: UnacknowledgedMessage,
causalHistory: seq[HistoryEntry], causalHistory: seq[SdsMessageID],
rbf: Option[RollingBloomFilter], rbf: Option[RollingBloomFilter],
): bool = ): bool =
if msg.message.messageId in causalHistory.getMessageIds(): if msg.message.messageId in causalHistory:
return true return true
if rbf.isSome(): if rbf.isSome():
return rbf.get().contains(msg.message.messageId) return rbf.get().contains(msg.message.messageId)
return false false
proc reviewAckStatus(rm: ReliabilityManager, msg: SdsMessage) {.gcsafe.} = proc reviewAckStatus(rm: ReliabilityManager, msg: SdsMessage) {.gcsafe.} =
# Parse bloom filter
var rbf: Option[RollingBloomFilter] var rbf: Option[RollingBloomFilter]
if msg.bloomFilter.len > 0: if msg.bloomFilter.len > 0:
let bfResult = deserializeBloomFilter(msg.bloomFilter) let bfResult = deserializeBloomFilter(msg.bloomFilter)
if bfResult.isOk(): if bfResult.isOk():
let bf = bfResult.get()
rbf = some( rbf = some(
RollingBloomFilter.init( RollingBloomFilter(
filter = bf, filter: bfResult.get(),
capacity = bf.capacity, capacity: bfResult.get().capacity,
minCapacity = (bf.capacity.float * (100 - CapacityFlexPercent).float / 100.0).int, minCapacity: (
maxCapacity = (bf.capacity.float * (100 + CapacityFlexPercent).float / 100.0).int, bfResult.get().capacity.float * (100 - CapacityFlexPercent).float / 100.0
).int,
maxCapacity: (
bfResult.get().capacity.float * (100 + CapacityFlexPercent).float / 100.0
).int,
messages: @[],
) )
) )
else: else:
@ -53,6 +64,7 @@ proc reviewAckStatus(rm: ReliabilityManager, msg: SdsMessage) {.gcsafe.} =
return return
let channel = rm.channels[msg.channelId] let channel = rm.channels[msg.channelId]
# Keep track of indices to delete
var toDelete: seq[int] = @[] var toDelete: seq[int] = @[]
var i = 0 var i = 0
@ -64,7 +76,7 @@ proc reviewAckStatus(rm: ReliabilityManager, msg: SdsMessage) {.gcsafe.} =
toDelete.add(i) toDelete.add(i)
inc i inc i
for i in countdown(toDelete.high, 0): for i in countdown(toDelete.high, 0): # Delete in reverse order to maintain indices
channel.outgoingBuffer.delete(toDelete[i]) channel.outgoingBuffer.delete(toDelete[i])
proc wrapOutgoingMessage*( proc wrapOutgoingMessage*(
@ -74,6 +86,14 @@ proc wrapOutgoingMessage*(
channelId: SdsChannelID, channelId: SdsChannelID,
): Result[seq[byte], ReliabilityError] = ): Result[seq[byte], ReliabilityError] =
## Wraps an outgoing message with reliability metadata. ## Wraps an outgoing message with reliability metadata.
##
## Parameters:
## - message: The content of the message to be sent.
## - messageId: Unique identifier for the message
## - channelId: Identifier for the channel this message belongs to.
##
## Returns:
## A Result containing either wrapped message bytes or an error.
if message.len == 0: if message.len == 0:
return err(ReliabilityError.reInvalidArgument) return err(ReliabilityError.reInvalidArgument)
if message.len > MaxMessageSize: if message.len > MaxMessageSize:
@ -89,46 +109,22 @@ proc wrapOutgoingMessage*(
error "Failed to serialize bloom filter", channelId = channelId error "Failed to serialize bloom filter", channelId = channelId
return err(ReliabilityError.reSerializationError) return err(ReliabilityError.reSerializationError)
# SDS-R: collect eligible expired repair requests to attach. Per let msg = SdsMessage(
# spec (sds-r-send-message, RECOMMENDED), prioritise the entries with messageId: messageId,
# the smallest minTimeRepairReq — they are the most overdue and the lamportTimestamp: channel.lamportTimestamp,
# ones the network most needs us to ask about. causalHistory: rm.getRecentSdsMessageIDs(rm.config.maxCausalHistory, channelId),
var repairReqs: seq[HistoryEntry] = @[] channelId: channelId,
let now = getTime() content: message,
var expiredKeys: seq[SdsMessageID] = @[] bloomFilter: bfResult.get(),
var eligible: seq[(SdsMessageID, OutgoingRepairEntry)] = @[]
for msgId, repairEntry in channel.outgoingRepairBuffer:
if now >= repairEntry.minTimeRepairReq:
eligible.add((msgId, repairEntry))
eligible.sort do(a, b: (SdsMessageID, OutgoingRepairEntry)) -> int:
cmp(a[1].minTimeRepairReq, b[1].minTimeRepairReq)
let take = min(eligible.len, rm.config.maxRepairRequests)
for i in 0 ..< take:
repairReqs.add(eligible[i][1].outHistEntry)
expiredKeys.add(eligible[i][0])
for key in expiredKeys:
channel.outgoingRepairBuffer.del(key)
let msg = SdsMessage.init(
messageId = messageId,
lamportTimestamp = channel.lamportTimestamp,
causalHistory = rm.getRecentHistoryEntries(rm.config.maxCausalHistory, channelId),
channelId = channelId,
content = message,
bloomFilter = bfResult.get(),
senderId = rm.participantId,
repairRequest = repairReqs,
) )
channel.outgoingBuffer.add( channel.outgoingBuffer.add(
UnacknowledgedMessage.init(message = msg, sendTime = getTime(), resendAttempts = 0) UnacknowledgedMessage(message: msg, sendTime: getTime(), resendAttempts: 0)
) )
# Add to causal history and bloom filter
channel.bloomFilter.add(msg.messageId) channel.bloomFilter.add(msg.messageId)
# The full SdsMessage carries senderId and content, so a single rm.addToHistory(msg.messageId, channelId)
# addToHistory replaces the old triple-write to messageHistory,
# messageCache, and messageSenders.
rm.addToHistory(msg, channelId)
return serializeMessage(msg) return serializeMessage(msg)
except Exception: except Exception:
@ -149,6 +145,7 @@ proc processIncomingBuffer(rm: ReliabilityManager, channelId: SdsChannelID) {.gc
var processed = initHashSet[SdsMessageID]() var processed = initHashSet[SdsMessageID]()
var readyToProcess = newSeq[SdsMessageID]() var readyToProcess = newSeq[SdsMessageID]()
# Find initially ready messages
for msgId, entry in channel.incomingBuffer: for msgId, entry in channel.incomingBuffer:
if entry.missingDeps.len == 0: if entry.missingDeps.len == 0:
readyToProcess.add(msgId) readyToProcess.add(msgId)
@ -159,11 +156,12 @@ proc processIncomingBuffer(rm: ReliabilityManager, channelId: SdsChannelID) {.gc
continue continue
if msgId in channel.incomingBuffer: if msgId in channel.incomingBuffer:
rm.addToHistory(channel.incomingBuffer[msgId].message, channelId) rm.addToHistory(msgId, channelId)
if not rm.onMessageReady.isNil(): if not rm.onMessageReady.isNil():
rm.onMessageReady(msgId, channelId) rm.onMessageReady(msgId, channelId)
processed.incl(msgId) processed.incl(msgId)
# Update dependencies for remaining messages
for remainingId, entry in channel.incomingBuffer: for remainingId, entry in channel.incomingBuffer:
if remainingId notin processed: if remainingId notin processed:
if msgId in entry.missingDeps: if msgId in entry.missingDeps:
@ -171,16 +169,23 @@ proc processIncomingBuffer(rm: ReliabilityManager, channelId: SdsChannelID) {.gc
if channel.incomingBuffer[remainingId].missingDeps.len == 0: if channel.incomingBuffer[remainingId].missingDeps.len == 0:
readyToProcess.add(remainingId) readyToProcess.add(remainingId)
# Remove processed messages
for msgId in processed: for msgId in processed:
channel.incomingBuffer.del(msgId) channel.incomingBuffer.del(msgId)
proc unwrapReceivedMessage*( proc unwrapReceivedMessage*(
rm: ReliabilityManager, message: seq[byte] rm: ReliabilityManager, message: seq[byte]
): Result[ ): Result[
tuple[message: seq[byte], missingDeps: seq[HistoryEntry], channelId: SdsChannelID], tuple[message: seq[byte], missingDeps: seq[SdsMessageID], channelId: SdsChannelID],
ReliabilityError, ReliabilityError,
] = ] =
## Unwraps a received message and processes its reliability metadata. ## Unwraps a received message and processes its reliability metadata.
##
## Parameters:
## - message: The received message bytes
##
## Returns:
## A Result containing either tuple of (processed message, missing dependencies, channel ID) or an error.
try: try:
let channelId = extractChannelId(message).valueOr: let channelId = extractChannelId(message).valueOr:
return err(ReliabilityError.reDeserializationError) return err(ReliabilityError.reDeserializationError)
@ -190,87 +195,39 @@ proc unwrapReceivedMessage*(
let channel = rm.getOrCreateChannel(channelId) let channel = rm.getOrCreateChannel(channelId)
# SDS-R: opportunistic repair-buffer cleanup — applies to duplicates too,
# so rebroadcasts cancel redundant responses on peers that already have the message.
channel.outgoingRepairBuffer.del(msg.messageId)
channel.incomingRepairBuffer.del(msg.messageId)
if msg.messageId in channel.messageHistory: if msg.messageId in channel.messageHistory:
return ok((msg.content, @[], channelId)) return ok((msg.content, @[], channelId))
channel.bloomFilter.add(msg.messageId) channel.bloomFilter.add(msg.messageId)
rm.updateLamportTimestamp(msg.lamportTimestamp, channelId) rm.updateLamportTimestamp(msg.lamportTimestamp, channelId)
# Review ACK status for outgoing messages
rm.reviewAckStatus(msg) rm.reviewAckStatus(msg)
# SDS-R: process incoming repair requests from this message. We can only
# answer for messages we have actually delivered (i.e. that live in
# messageHistory) — buffered-but-undelivered messages are not in a state
# to confidently rebroadcast.
let now = getTime()
for repairEntry in msg.repairRequest:
# Remove from our own outgoing repair buffer (someone else is also requesting)
channel.outgoingRepairBuffer.del(repairEntry.messageId)
if repairEntry.messageId in channel.messageHistory and
rm.participantId.len > 0 and repairEntry.senderId.len > 0:
if isInResponseGroup(
rm.participantId, repairEntry.senderId,
repairEntry.messageId, rm.config.numResponseGroups
):
let serialized = serializeMessage(channel.messageHistory[repairEntry.messageId])
if serialized.isOk():
let tResp = computeTResp(
rm.participantId, repairEntry.senderId,
repairEntry.messageId, rm.config.repairTMax
)
channel.incomingRepairBuffer[repairEntry.messageId] = IncomingRepairEntry(
inHistEntry: repairEntry,
cachedMessage: serialized.get(),
minTimeRepairResp: now + tResp,
)
var missingDeps = rm.checkDependencies(msg.causalHistory, channelId) var missingDeps = rm.checkDependencies(msg.causalHistory, channelId)
if missingDeps.len == 0: if missingDeps.len == 0:
var depsInBuffer = false var depsInBuffer = false
for msgId, entry in channel.incomingBuffer.pairs(): for msgId, entry in channel.incomingBuffer.pairs():
if msgId in msg.causalHistory.getMessageIds(): if msgId in msg.causalHistory:
depsInBuffer = true depsInBuffer = true
break break
# Check if any dependencies are still in incoming buffer
if depsInBuffer: if depsInBuffer:
channel.incomingBuffer[msg.messageId] = channel.incomingBuffer[msg.messageId] =
IncomingMessage.init(message = msg, missingDeps = initHashSet[SdsMessageID]()) IncomingMessage(message: msg, missingDeps: initHashSet[SdsMessageID]())
else: else:
rm.addToHistory(msg, channelId) # All dependencies met, add to history
# Unblock any buffered messages that were waiting on this one. rm.addToHistory(msg.messageId, channelId)
for pendingId, entry in channel.incomingBuffer:
if msg.messageId in entry.missingDeps:
channel.incomingBuffer[pendingId].missingDeps.excl(msg.messageId)
rm.processIncomingBuffer(channelId) rm.processIncomingBuffer(channelId)
if not rm.onMessageReady.isNil(): if not rm.onMessageReady.isNil():
rm.onMessageReady(msg.messageId, channelId) rm.onMessageReady(msg.messageId, channelId)
else: else:
channel.incomingBuffer[msg.messageId] = channel.incomingBuffer[msg.messageId] =
IncomingMessage.init( IncomingMessage(message: msg, missingDeps: missingDeps.toHashSet())
message = msg,
missingDeps = missingDeps.getMessageIds().toHashSet(),
)
if not rm.onMissingDependencies.isNil(): if not rm.onMissingDependencies.isNil():
rm.onMissingDependencies(msg.messageId, missingDeps, channelId) rm.onMissingDependencies(msg.messageId, missingDeps, channelId)
# SDS-R: add missing deps to outgoing repair buffer
if rm.participantId.len > 0:
for dep in missingDeps:
if dep.messageId notin channel.outgoingRepairBuffer:
let tReq = computeTReq(
rm.participantId, dep.messageId,
rm.config.repairTMin, rm.config.repairTMax
)
channel.outgoingRepairBuffer[dep.messageId] = OutgoingRepairEntry(
outHistEntry: dep,
minTimeRepairReq: now + tReq,
)
return ok((msg.content, missingDeps, channelId)) return ok((msg.content, missingDeps, channelId))
except Exception: except Exception:
error "Failed to unwrap message", msg = getCurrentExceptionMsg() error "Failed to unwrap message", msg = getCurrentExceptionMsg()
@ -280,6 +237,13 @@ proc markDependenciesMet*(
rm: ReliabilityManager, messageIds: seq[SdsMessageID], channelId: SdsChannelID rm: ReliabilityManager, messageIds: seq[SdsMessageID], channelId: SdsChannelID
): Result[void, ReliabilityError] = ): Result[void, ReliabilityError] =
## Marks the specified message dependencies as met. ## Marks the specified message dependencies as met.
##
## Parameters:
## - messageIds: A sequence of message IDs to mark as met.
## - channelId: Identifier for the channel.
##
## Returns:
## A Result indicating success or an error.
try: try:
if channelId notin rm.channels: if channelId notin rm.channels:
return err(ReliabilityError.reInvalidArgument) return err(ReliabilityError.reInvalidArgument)
@ -294,10 +258,6 @@ proc markDependenciesMet*(
if msgId in entry.missingDeps: if msgId in entry.missingDeps:
channel.incomingBuffer[pendingId].missingDeps.excl(msgId) channel.incomingBuffer[pendingId].missingDeps.excl(msgId)
# SDS-R: clear from repair buffers (dependency now met)
channel.outgoingRepairBuffer.del(msgId)
channel.incomingRepairBuffer.del(msgId)
rm.processIncomingBuffer(channelId) rm.processIncomingBuffer(channelId)
return ok() return ok()
except Exception: except Exception:
@ -311,21 +271,24 @@ proc setCallbacks*(
onMessageSent: MessageSentCallback, onMessageSent: MessageSentCallback,
onMissingDependencies: MissingDependenciesCallback, onMissingDependencies: MissingDependenciesCallback,
onPeriodicSync: PeriodicSyncCallback = nil, onPeriodicSync: PeriodicSyncCallback = nil,
onRetrievalHint: RetrievalHintProvider = nil,
onRepairReady: RepairReadyCallback = nil,
) = ) =
## Sets the callback functions for various events in the ReliabilityManager. ## Sets the callback functions for various events in the ReliabilityManager.
##
## Parameters:
## - onMessageReady: Callback function called when a message is ready to be processed.
## - onMessageSent: Callback function called when a message is confirmed as sent.
## - onMissingDependencies: Callback function called when a message has missing dependencies.
## - onPeriodicSync: Callback function called to notify about periodic sync
withLock rm.lock: withLock rm.lock:
rm.onMessageReady = onMessageReady rm.onMessageReady = onMessageReady
rm.onMessageSent = onMessageSent rm.onMessageSent = onMessageSent
rm.onMissingDependencies = onMissingDependencies rm.onMissingDependencies = onMissingDependencies
rm.onPeriodicSync = onPeriodicSync rm.onPeriodicSync = onPeriodicSync
rm.onRetrievalHint = onRetrievalHint
rm.onRepairReady = onRepairReady
proc checkUnacknowledgedMessages( proc checkUnacknowledgedMessages(
rm: ReliabilityManager, channelId: SdsChannelID rm: ReliabilityManager, channelId: SdsChannelID
) {.gcsafe.} = ) {.gcsafe.} =
## Checks and processes unacknowledged messages in the outgoing buffer.
withLock rm.lock: withLock rm.lock:
if channelId notin rm.channels: if channelId notin rm.channels:
error "Channel does not exist", channelId = channelId error "Channel does not exist", channelId = channelId
@ -354,6 +317,7 @@ proc checkUnacknowledgedMessages(
proc periodicBufferSweep( proc periodicBufferSweep(
rm: ReliabilityManager rm: ReliabilityManager
) {.async: (raises: [CancelledError]), gcsafe.} = ) {.async: (raises: [CancelledError]), gcsafe.} =
## Periodically sweeps the buffer to clean up and check unacknowledged messages.
while true: while true:
try: try:
for channelId, channel in rm.channels: for channelId, channel in rm.channels:
@ -371,6 +335,7 @@ proc periodicBufferSweep(
proc periodicSyncMessage( proc periodicSyncMessage(
rm: ReliabilityManager rm: ReliabilityManager
) {.async: (raises: [CancelledError]), gcsafe.} = ) {.async: (raises: [CancelledError]), gcsafe.} =
## Periodically notifies to send a sync message to maintain connectivity.
while true: while true:
try: try:
if not rm.onPeriodicSync.isNil(): if not rm.onPeriodicSync.isNil():
@ -379,71 +344,27 @@ proc periodicSyncMessage(
error "Error in periodic sync", msg = getCurrentExceptionMsg() error "Error in periodic sync", msg = getCurrentExceptionMsg()
await sleepAsync(chronos.seconds(rm.config.syncMessageInterval.inSeconds)) await sleepAsync(chronos.seconds(rm.config.syncMessageInterval.inSeconds))
proc runRepairSweep*(rm: ReliabilityManager) {.gcsafe, raises: [].} =
## SDS-R: Runs a single pass of the repair sweep.
## - Incoming: fires onRepairReady for expired T_resp entries and removes them
## - Outgoing: drops entries past T_max window
## Exposed so it can be driven directly in tests; also invoked by periodicRepairSweep.
## Acquires rm.lock so the repair buffers cannot be observed mid-mutation by
## a concurrent wrapOutgoingMessage / unwrapReceivedMessage on another thread.
withLock rm.lock:
try:
let now = getTime()
for channelId, channel in rm.channels:
try:
# Check incoming repair buffer for expired T_resp (time to rebroadcast)
var toRebroadcast: seq[SdsMessageID] = @[]
for msgId, entry in channel.incomingRepairBuffer:
if now >= entry.minTimeRepairResp:
toRebroadcast.add(msgId)
for msgId in toRebroadcast:
let entry = channel.incomingRepairBuffer[msgId]
channel.incomingRepairBuffer.del(msgId)
if not rm.onRepairReady.isNil():
rm.onRepairReady(entry.cachedMessage, channelId)
# Drop expired outgoing repair entries past T_max
var toRemove: seq[SdsMessageID] = @[]
let tMaxDuration = rm.config.repairTMax
for msgId, entry in channel.outgoingRepairBuffer:
if now - entry.minTimeRepairReq > tMaxDuration:
toRemove.add(msgId)
for msgId in toRemove:
channel.outgoingRepairBuffer.del(msgId)
except Exception:
error "Error in repair sweep for channel",
channelId = channelId, msg = getCurrentExceptionMsg()
except Exception:
error "Error in repair sweep", msg = getCurrentExceptionMsg()
proc periodicRepairSweep(
rm: ReliabilityManager
) {.async: (raises: [CancelledError]), gcsafe.} =
## SDS-R: Periodically checks repair buffers for expired entries.
while true:
rm.runRepairSweep()
await sleepAsync(chronos.milliseconds(rm.config.repairSweepInterval.inMilliseconds))
proc startPeriodicTasks*(rm: ReliabilityManager) = proc startPeriodicTasks*(rm: ReliabilityManager) =
## Starts the periodic tasks for buffer sweeping and sync message sending. ## Starts the periodic tasks for buffer sweeping and sync message sending.
##
## This procedure should be called after creating a ReliabilityManager to enable automatic maintenance.
asyncSpawn rm.periodicBufferSweep() asyncSpawn rm.periodicBufferSweep()
asyncSpawn rm.periodicSyncMessage() asyncSpawn rm.periodicSyncMessage()
asyncSpawn rm.periodicRepairSweep()
proc resetReliabilityManager*(rm: ReliabilityManager): Result[void, ReliabilityError] = proc resetReliabilityManager*(rm: ReliabilityManager): Result[void, ReliabilityError] =
## Resets the ReliabilityManager to its initial state. ## Resets the ReliabilityManager to its initial state.
##
## This procedure clears all buffers and resets the Lamport timestamp.
withLock rm.lock: withLock rm.lock:
try: try:
for channelId, channel in rm.channels: for channelId, channel in rm.channels:
channel.lamportTimestamp = 0 channel.lamportTimestamp = 0
channel.messageHistory.clear() channel.messageHistory.setLen(0)
channel.outgoingBuffer.setLen(0) channel.outgoingBuffer.setLen(0)
channel.incomingBuffer.clear() channel.incomingBuffer.clear()
channel.outgoingRepairBuffer.clear() channel.bloomFilter = newRollingBloomFilter(
channel.incomingRepairBuffer.clear() rm.config.bloomFilterCapacity, rm.config.bloomFilterErrorRate
channel.bloomFilter = )
RollingBloomFilter.init(rm.config.bloomFilterCapacity, rm.config.bloomFilterErrorRate)
rm.channels.clear() rm.channels.clear()
return ok() return ok()
except Exception: except Exception:

244
src/reliability_utils.nim Normal file
View File

@ -0,0 +1,244 @@
import std/[times, locks, tables]
import chronicles, results
import ./[rolling_bloom_filter, message]
type
MessageReadyCallback* =
proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
MessageSentCallback* =
proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
MissingDependenciesCallback* = proc(
messageId: SdsMessageID, missingDeps: seq[SdsMessageID], channelId: SdsChannelID
) {.gcsafe.}
PeriodicSyncCallback* = proc() {.gcsafe, raises: [].}
AppCallbacks* = ref object
messageReadyCb*: MessageReadyCallback
messageSentCb*: MessageSentCallback
missingDependenciesCb*: MissingDependenciesCallback
periodicSyncCb*: PeriodicSyncCallback
ReliabilityConfig* = object
bloomFilterCapacity*: int
bloomFilterErrorRate*: float
maxMessageHistory*: int
maxCausalHistory*: int
resendInterval*: Duration
maxResendAttempts*: int
syncMessageInterval*: Duration
bufferSweepInterval*: Duration
ChannelContext* = ref object
lamportTimestamp*: int64
messageHistory*: seq[SdsMessageID]
bloomFilter*: RollingBloomFilter
outgoingBuffer*: seq[UnacknowledgedMessage]
incomingBuffer*: Table[SdsMessageID, IncomingMessage]
ReliabilityManager* = ref object
channels*: Table[SdsChannelID, ChannelContext]
config*: ReliabilityConfig
lock*: Lock
onMessageReady*: proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
onMessageSent*: proc(messageId: SdsMessageID, channelId: SdsChannelID) {.gcsafe.}
onMissingDependencies*: proc(
messageId: SdsMessageID, missingDeps: seq[SdsMessageID], channelId: SdsChannelID
) {.gcsafe.}
onPeriodicSync*: PeriodicSyncCallback
ReliabilityError* {.pure.} = enum
reInvalidArgument
reOutOfMemory
reInternalError
reSerializationError
reDeserializationError
reMessageTooLarge
proc defaultConfig*(): ReliabilityConfig =
## Creates a default configuration for the ReliabilityManager.
##
## Returns:
## A ReliabilityConfig object with default values.
ReliabilityConfig(
bloomFilterCapacity: DefaultBloomFilterCapacity,
bloomFilterErrorRate: DefaultBloomFilterErrorRate,
maxMessageHistory: DefaultMaxMessageHistory,
maxCausalHistory: DefaultMaxCausalHistory,
resendInterval: DefaultResendInterval,
maxResendAttempts: DefaultMaxResendAttempts,
syncMessageInterval: DefaultSyncMessageInterval,
bufferSweepInterval: DefaultBufferSweepInterval,
)
proc cleanup*(rm: ReliabilityManager) {.raises: [].} =
if not rm.isNil():
try:
withLock rm.lock:
for channelId, channel in rm.channels:
channel.outgoingBuffer.setLen(0)
channel.incomingBuffer.clear()
channel.messageHistory.setLen(0)
rm.channels.clear()
except Exception:
error "Error during cleanup", error = getCurrentExceptionMsg()
proc cleanBloomFilter*(
rm: ReliabilityManager, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
withLock rm.lock:
try:
if channelId in rm.channels:
rm.channels[channelId].bloomFilter.clean()
except Exception:
error "Failed to clean bloom filter",
error = getCurrentExceptionMsg(), channelId = channelId
proc addToHistory*(
rm: ReliabilityManager, msgId: SdsMessageID, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.messageHistory.add(msgId)
if channel.messageHistory.len > rm.config.maxMessageHistory:
channel.messageHistory.delete(0)
except Exception:
error "Failed to add to history",
channelId = channelId, msgId = msgId, error = getCurrentExceptionMsg()
proc updateLamportTimestamp*(
rm: ReliabilityManager, msgTs: int64, channelId: SdsChannelID
) {.gcsafe, raises: [].} =
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.lamportTimestamp = max(msgTs, channel.lamportTimestamp) + 1
except Exception:
error "Failed to update lamport timestamp",
channelId = channelId, msgTs = msgTs, error = getCurrentExceptionMsg()
proc getRecentSdsMessageIDs*(
rm: ReliabilityManager, n: int, channelId: SdsChannelID
): seq[SdsMessageID] =
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
result = channel.messageHistory[max(0, channel.messageHistory.len - n) .. ^1]
else:
result = @[]
except Exception:
error "Failed to get recent message IDs",
channelId = channelId, n = n, error = getCurrentExceptionMsg()
result = @[]
proc checkDependencies*(
rm: ReliabilityManager, deps: seq[SdsMessageID], channelId: SdsChannelID
): seq[SdsMessageID] =
var missingDeps: seq[SdsMessageID] = @[]
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
for depId in deps:
if depId notin channel.messageHistory:
missingDeps.add(depId)
else:
missingDeps = deps
except Exception:
error "Failed to check dependencies",
channelId = channelId, error = getCurrentExceptionMsg()
missingDeps = deps
return missingDeps
proc getMessageHistory*(
rm: ReliabilityManager, channelId: SdsChannelID
): seq[SdsMessageID] =
withLock rm.lock:
try:
if channelId in rm.channels:
result = rm.channels[channelId].messageHistory
else:
result = @[]
except Exception:
error "Failed to get message history",
channelId = channelId, error = getCurrentExceptionMsg()
result = @[]
proc getOutgoingBuffer*(
rm: ReliabilityManager, channelId: SdsChannelID
): seq[UnacknowledgedMessage] =
withLock rm.lock:
try:
if channelId in rm.channels:
result = rm.channels[channelId].outgoingBuffer
else:
result = @[]
except Exception:
error "Failed to get outgoing buffer",
channelId = channelId, error = getCurrentExceptionMsg()
result = @[]
proc getIncomingBuffer*(
rm: ReliabilityManager, channelId: SdsChannelID
): Table[SdsMessageID, message.IncomingMessage] =
withLock rm.lock:
try:
if channelId in rm.channels:
result = rm.channels[channelId].incomingBuffer
else:
result = initTable[SdsMessageID, message.IncomingMessage]()
except Exception:
error "Failed to get incoming buffer",
channelId = channelId, error = getCurrentExceptionMsg()
result = initTable[SdsMessageID, message.IncomingMessage]()
proc getOrCreateChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): ChannelContext =
try:
if channelId notin rm.channels:
rm.channels[channelId] = ChannelContext(
lamportTimestamp: 0,
messageHistory: @[],
bloomFilter: newRollingBloomFilter(
rm.config.bloomFilterCapacity, rm.config.bloomFilterErrorRate
),
outgoingBuffer: @[],
incomingBuffer: initTable[SdsMessageID, IncomingMessage](),
)
result = rm.channels[channelId]
except Exception:
error "Failed to get or create channel",
channelId = channelId, error = getCurrentExceptionMsg()
raise
proc ensureChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): Result[void, ReliabilityError] =
withLock rm.lock:
try:
discard rm.getOrCreateChannel(channelId)
return ok()
except Exception:
error "Failed to ensure channel",
channelId = channelId, msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)
proc removeChannel*(
rm: ReliabilityManager, channelId: SdsChannelID
): Result[void, ReliabilityError] =
withLock rm.lock:
try:
if channelId in rm.channels:
let channel = rm.channels[channelId]
channel.outgoingBuffer.setLen(0)
channel.incomingBuffer.clear()
channel.messageHistory.setLen(0)
rm.channels.del(channelId)
return ok()
except Exception:
error "Failed to remove channel",
channelId = channelId, msg = getCurrentExceptionMsg()
return err(ReliabilityError.reInternalError)

View File

@ -1,14 +1,23 @@
import chronos import chronos
import chronicles import chronicles
import ./bloom import ./[bloom, message]
import ./types/rolling_bloom_filter
export rolling_bloom_filter
proc init*( type RollingBloomFilter* = object
T: type RollingBloomFilter, filter*: BloomFilter
capacity*: int
minCapacity*: int
maxCapacity*: int
messages*: seq[SdsMessageID]
const
DefaultBloomFilterCapacity* = 10000
DefaultBloomFilterErrorRate* = 0.001
CapacityFlexPercent* = 20
proc newRollingBloomFilter*(
capacity: int = DefaultBloomFilterCapacity, capacity: int = DefaultBloomFilterCapacity,
errorRate: float = DefaultBloomFilterErrorRate, errorRate: float = DefaultBloomFilterErrorRate,
): T {.gcsafe.} = ): RollingBloomFilter {.gcsafe.} =
let targetCapacity = if capacity <= 0: DefaultBloomFilterCapacity else: capacity let targetCapacity = if capacity <= 0: DefaultBloomFilterCapacity else: capacity
let targetError = let targetError =
if errorRate <= 0.0 or errorRate >= 1.0: DefaultBloomFilterErrorRate else: errorRate if errorRate <= 0.0 or errorRate >= 1.0: DefaultBloomFilterErrorRate else: errorRate
@ -16,6 +25,7 @@ proc init*(
let filterResult = initializeBloomFilter(targetCapacity, targetError) let filterResult = initializeBloomFilter(targetCapacity, targetError)
if filterResult.isErr: if filterResult.isErr:
error "Failed to initialize bloom filter", error = filterResult.error error "Failed to initialize bloom filter", error = filterResult.error
# Try with default values if custom values failed
if capacity != DefaultBloomFilterCapacity or errorRate != DefaultBloomFilterErrorRate: if capacity != DefaultBloomFilterCapacity or errorRate != DefaultBloomFilterErrorRate:
let defaultResult = let defaultResult =
initializeBloomFilter(DefaultBloomFilterCapacity, DefaultBloomFilterErrorRate) initializeBloomFilter(DefaultBloomFilterCapacity, DefaultBloomFilterErrorRate)
@ -35,11 +45,12 @@ proc init*(
minCapacity = minCapacity, minCapacity = minCapacity,
maxCapacity = maxCapacity maxCapacity = maxCapacity
return RollingBloomFilter.init( return RollingBloomFilter(
filter = defaultResult.get(), filter: defaultResult.get(),
capacity = DefaultBloomFilterCapacity, capacity: DefaultBloomFilterCapacity,
minCapacity = minCapacity, minCapacity: minCapacity,
maxCapacity = maxCapacity, maxCapacity: maxCapacity,
messages: @[],
) )
else: else:
error "Could not create bloom filter", error = filterResult.error error "Could not create bloom filter", error = filterResult.error
@ -52,11 +63,12 @@ proc init*(
info "Successfully initialized bloom filter", info "Successfully initialized bloom filter",
capacity = targetCapacity, minCapacity = minCapacity, maxCapacity = maxCapacity capacity = targetCapacity, minCapacity = minCapacity, maxCapacity = maxCapacity
return RollingBloomFilter.init( return RollingBloomFilter(
filter = filterResult.get(), filter: filterResult.get(),
capacity = targetCapacity, capacity: targetCapacity,
minCapacity = minCapacity, minCapacity: minCapacity,
maxCapacity = maxCapacity, maxCapacity: maxCapacity,
messages: @[],
) )
proc clean*(rbf: var RollingBloomFilter) {.gcsafe.} = proc clean*(rbf: var RollingBloomFilter) {.gcsafe.} =
@ -85,12 +97,22 @@ proc clean*(rbf: var RollingBloomFilter) {.gcsafe.} =
proc add*(rbf: var RollingBloomFilter, messageId: SdsMessageID) {.gcsafe.} = proc add*(rbf: var RollingBloomFilter, messageId: SdsMessageID) {.gcsafe.} =
## Adds a message ID to the rolling bloom filter. ## Adds a message ID to the rolling bloom filter.
##
## Parameters:
## - messageId: The ID of the message to add.
rbf.filter.insert(cast[string](messageId)) rbf.filter.insert(cast[string](messageId))
rbf.messages.add(messageId) rbf.messages.add(messageId)
# Clean if we exceed max capacity
if rbf.messages.len > rbf.maxCapacity: if rbf.messages.len > rbf.maxCapacity:
rbf.clean() rbf.clean()
proc contains*(rbf: RollingBloomFilter, messageId: SdsMessageID): bool = proc contains*(rbf: RollingBloomFilter, messageId: SdsMessageID): bool =
## Checks if a message ID is in the rolling bloom filter. ## Checks if a message ID is in the rolling bloom filter.
return rbf.filter.lookup(cast[string](messageId)) ##
## Parameters:
## - messageId: The ID of the message to check.
##
## Returns:
## True if the message ID is probably in the filter, false otherwise.
rbf.filter.lookup(cast[string](messageId))

View File

@ -1 +0,0 @@
path = "../"

View File

@ -1,5 +1,5 @@
import unittest, results, strutils import unittest, results, strutils
import sds/bloom import ../src/bloom
from random import rand, randomize from random import rand, randomize
suite "bloom filter": suite "bloom filter":

File diff suppressed because it is too large Load Diff

View File

@ -1,80 +0,0 @@
#!/usr/bin/env bash
# Generates nix/deps.nix from nimble.lock using nix-prefetch-git.
# Usage: ./tools/gen-nix-deps.sh [nimble.lock] [nix/deps.nix]
set -euo pipefail
usage() {
cat <<EOF
Usage:
$0 <nimble.lock> <output.nix>
Example:
$0 nimble.lock nix/deps.nix
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage; exit 0
fi
if [[ $# -ne 2 ]]; then
usage; exit 1
fi
LOCKFILE="$1"
OUTFILE="$2"
command -v jq >/dev/null || { echo "error: jq required"; exit 1; }
command -v nix-prefetch-git >/dev/null || { echo "error: nix-prefetch-git required"; exit 1; }
if [[ ! -f "$LOCKFILE" ]]; then
echo "[!] $LOCKFILE not found"
echo "[*] Generating $LOCKFILE via 'nimble lock'"
nimble lock
fi
echo "[*] Generating $OUTFILE from $LOCKFILE"
mkdir -p "$(dirname "$OUTFILE")"
cat > "$OUTFILE" <<'EOF'
# AUTOGENERATED from nimble.lock — do not edit manually.
# Regenerate with: ./tools/gen-nix-deps.sh nimble.lock nix/deps.nix
{ pkgs }:
{
EOF
jq -c '
.packages
| to_entries[]
| select(.value.downloadMethod == "git")
| select(.key != "nim" and .key != "nimble")
' "$LOCKFILE" | while read -r entry; do
name=$(jq -r '.key' <<<"$entry")
url=$(jq -r '.value.url' <<<"$entry")
rev=$(jq -r '.value.vcsRevision' <<<"$entry")
echo " [*] Prefetching $name @ $rev"
sha=$(nix-prefetch-git \
--url "$url" \
--rev "$rev" \
--fetch-submodules \
| jq -r '.sha256')
cat >> "$OUTFILE" <<EOF
${name} = pkgs.fetchgit {
url = "${url}";
rev = "${rev}";
sha256 = "${sha}";
fetchSubmodules = true;
};
EOF
done
cat >> "$OUTFILE" <<'EOF'
}
EOF
echo "[✓] Wrote $OUTFILE"

1
vendor/nim-chronicles vendored Submodule

@ -0,0 +1 @@
Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12

1
vendor/nim-chronos vendored Submodule

@ -0,0 +1 @@
Subproject commit b55e2816eb45f698ddaca8d8473e401502562db2

1
vendor/nim-confutils vendored Submodule

@ -0,0 +1 @@
Subproject commit e214b3992a31acece6a9aada7d0a1ad37c928f3b

1
vendor/nim-faststreams vendored Submodule

@ -0,0 +1 @@
Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c

1
vendor/nim-json-serialization vendored Submodule

@ -0,0 +1 @@
Subproject commit 2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf

1
vendor/nim-libp2p vendored Submodule

@ -0,0 +1 @@
Subproject commit ac25da6cea158768bbc060b7be2fbe004206f3bb

1
vendor/nim-results vendored Submodule

@ -0,0 +1 @@
Subproject commit df8113dda4c2d74d460a8fa98252b0b771bf1f27

1
vendor/nim-serialization vendored Submodule

@ -0,0 +1 @@
Subproject commit 548d0adc9797a10b2db7f788b804330306293088

1
vendor/nim-stew vendored Submodule

@ -0,0 +1 @@
Subproject commit d7a6868ba84165e7fdde427af9a1fc3f5f5cc151

1
vendor/nim-taskpools vendored Submodule

@ -0,0 +1 @@
Subproject commit 7b74a716a40249720fd7da428113147942b9642d

1
vendor/nimbus-build-system vendored Submodule

@ -0,0 +1 @@
Subproject commit 0be0663e1af76e869837226a4ef3e586fcc737d3