mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-02 13:33:10 +00:00
Chore/update nim version (#1052)
* Move to version 2.0.6 * Update nim-confutils submodule to latest version * Update dependencies * Update Nim version to 2.0.12 * Add gcsafe pragma * Add missing import * Update specific conf for Nim 2.x * Fix method signatures * Revert erasure coding attempt to fix bug * More gcsafe pragma * Duplicate code from libp2p because it is not exported anymore * Fix camelcase function names * Use alreadySeen because need is not a bool anymore * newLPStreamReadError does not exist anymore so use another error * Replace ValidIpAddress by IpAddress * Add gcsafe pragma * Restore maintenance parameter deleted by mistake when removing esasure coding fix attempt code * Update method signatures * Copy LPStreamReadError code from libp2p which was removed * Fix camel case * Fix enums in tests * Fix camel case * Extract node components to a variable to make Nim 2 happy * Update the tests using ValidIpAddress to IpAddress * Fix cast for value which is already an option * Set nim version to 2.0.x for CI * Set nim version to 2.0.x for CI * Move to miniupnp version 2.2.4 to avoid symlink error * Set core.symlinks to false for Windows for miniupnp >= 2.2.5 support * Update to Nim 2.0.14 * Update CI nim versions to 2.0.14 * Try with GCC 14 * Replace apt-fast by apt-get * Update ubuntu runner to latest * Use Ubuntu 20.04 for coverage * Disable CI cache for coverage * Add coverage property description * Remove commented test * Check the node value of seen instead of using alreadySeen * Fix the merge. The taskpool work was reverted. * Update nim-ethers submodule * Remove deprecated ValidIpAddress. Fix missing case and imports. * Fix a weird issue where nim-confutils cannot find NatAny * Fix tests and remove useless static keyword
This commit is contained in:
parent
caed3c07a3
commit
f25c555d59
35
.github/actions/nimbus-build-system/action.yml
vendored
35
.github/actions/nimbus-build-system/action.yml
vendored
@ -11,13 +11,16 @@ inputs:
|
|||||||
default: "amd64"
|
default: "amd64"
|
||||||
nim_version:
|
nim_version:
|
||||||
description: "Nim version"
|
description: "Nim version"
|
||||||
default: "version-1-6"
|
default: "v2.0.14"
|
||||||
rust_version:
|
rust_version:
|
||||||
description: "Rust version"
|
description: "Rust version"
|
||||||
default: "1.79.0"
|
default: "1.79.0"
|
||||||
shell:
|
shell:
|
||||||
description: "Shell to run commands in"
|
description: "Shell to run commands in"
|
||||||
default: "bash --noprofile --norc -e -o pipefail"
|
default: "bash --noprofile --norc -e -o pipefail"
|
||||||
|
coverage:
|
||||||
|
description: "True if the process is used for coverage"
|
||||||
|
default: false
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
@ -31,8 +34,8 @@ runs:
|
|||||||
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
|
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
sudo apt-fast update -qq
|
sudo apt-get update -qq
|
||||||
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||||
--no-install-recommends -yq lcov
|
--no-install-recommends -yq lcov
|
||||||
|
|
||||||
- name: APT (Linux i386)
|
- name: APT (Linux i386)
|
||||||
@ -40,8 +43,8 @@ runs:
|
|||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
sudo dpkg --add-architecture i386
|
sudo dpkg --add-architecture i386
|
||||||
sudo apt-fast update -qq
|
sudo apt-get update -qq
|
||||||
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||||
--no-install-recommends -yq gcc-multilib g++-multilib
|
--no-install-recommends -yq gcc-multilib g++-multilib
|
||||||
|
|
||||||
- name: Homebrew (macOS)
|
- name: Homebrew (macOS)
|
||||||
@ -78,11 +81,21 @@ runs:
|
|||||||
mingw-w64-i686-ntldd-git
|
mingw-w64-i686-ntldd-git
|
||||||
mingw-w64-i686-rust
|
mingw-w64-i686-rust
|
||||||
|
|
||||||
- name: MSYS2 (Windows All) - Downgrade to gcc 13
|
- name: MSYS2 (Windows All) - Update to gcc 14
|
||||||
if: inputs.os == 'windows'
|
if: inputs.os == 'windows'
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst
|
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
|
||||||
|
|
||||||
|
- name: Install gcc 14 on Linux
|
||||||
|
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
|
||||||
|
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
|
||||||
|
shell: ${{ inputs.shell }} {0}
|
||||||
|
run: |
|
||||||
|
# Add GCC-14 to alternatives
|
||||||
|
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||||
|
# Set GCC-14 as the default
|
||||||
|
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||||
|
|
||||||
- name: Derive environment variables
|
- name: Derive environment variables
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
@ -159,6 +172,7 @@ runs:
|
|||||||
- name: Restore Nim toolchain binaries from cache
|
- name: Restore Nim toolchain binaries from cache
|
||||||
id: nim-cache
|
id: nim-cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
|
if : ${{ !inputs.coverage }}
|
||||||
with:
|
with:
|
||||||
path: NimBinaries
|
path: NimBinaries
|
||||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
|
||||||
@ -168,9 +182,16 @@ runs:
|
|||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
|
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
|
- name: MSYS2 (Windows All) - Disable git symbolic links (since miniupnp 2.2.5)
|
||||||
|
if: inputs.os == 'windows'
|
||||||
|
shell: ${{ inputs.shell }} {0}
|
||||||
|
run: |
|
||||||
|
git config --global core.symlinks false
|
||||||
|
|
||||||
- name: Build Nim and Codex dependencies
|
- name: Build Nim and Codex dependencies
|
||||||
shell: ${{ inputs.shell }} {0}
|
shell: ${{ inputs.shell }} {0}
|
||||||
run: |
|
run: |
|
||||||
|
gcc --version
|
||||||
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
|
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
|
||||||
echo
|
echo
|
||||||
./env.sh nim --version
|
./env.sh nim --version
|
||||||
|
|||||||
1
.github/workflows/ci-reusable.yml
vendored
1
.github/workflows/ci-reusable.yml
vendored
@ -40,6 +40,7 @@ jobs:
|
|||||||
os: ${{ matrix.os }}
|
os: ${{ matrix.os }}
|
||||||
shell: ${{ matrix.shell }}
|
shell: ${{ matrix.shell }}
|
||||||
nim_version: ${{ matrix.nim_version }}
|
nim_version: ${{ matrix.nim_version }}
|
||||||
|
coverage: false
|
||||||
|
|
||||||
## Part 1 Tests ##
|
## Part 1 Tests ##
|
||||||
- name: Unit tests
|
- name: Unit tests
|
||||||
|
|||||||
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@ -9,7 +9,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: pinned
|
nim_version: v2.0.14
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||||
@ -27,10 +27,10 @@ jobs:
|
|||||||
uses: fabiocaccamo/create-matrix-action@v4
|
uses: fabiocaccamo/create-matrix-action@v4
|
||||||
with:
|
with:
|
||||||
matrix: |
|
matrix: |
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||||
@ -48,6 +48,10 @@ jobs:
|
|||||||
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
|
# Force to stick to ubuntu 20.04 for coverage because
|
||||||
|
# lcov was updated to 2.x version in ubuntu-latest
|
||||||
|
# and cause a lot of issues.
|
||||||
|
# See https://github.com/linux-test-project/lcov/issues/238
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
@ -61,6 +65,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
os: linux
|
os: linux
|
||||||
nim_version: ${{ env.nim_version }}
|
nim_version: ${{ env.nim_version }}
|
||||||
|
coverage: true
|
||||||
|
|
||||||
- name: Generate coverage data
|
- name: Generate coverage data
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
Makefile
4
Makefile
@ -15,8 +15,8 @@
|
|||||||
#
|
#
|
||||||
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||||
# version pinned by nimbus-build-system.
|
# version pinned by nimbus-build-system.
|
||||||
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||||
|
PINNED_NIM_VERSION := v2.0.14
|
||||||
ifeq ($(NIM_COMMIT),)
|
ifeq ($(NIM_COMMIT),)
|
||||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
else ifeq ($(NIM_COMMIT),pinned)
|
else ifeq ($(NIM_COMMIT),pinned)
|
||||||
|
|||||||
@ -47,7 +47,7 @@ when isMainModule:
|
|||||||
let config = CodexConf.load(
|
let config = CodexConf.load(
|
||||||
version = codexFullVersion,
|
version = codexFullVersion,
|
||||||
envVarsPrefix = "codex",
|
envVarsPrefix = "codex",
|
||||||
secondarySources = proc (config: CodexConf, sources: auto) =
|
secondarySources = proc (config: CodexConf, sources: auto) {.gcsafe, raises: [ConfigurationError].} =
|
||||||
if configFile =? config.configFile:
|
if configFile =? config.configFile:
|
||||||
sources.addConfigFile(Toml, configFile)
|
sources.addConfigFile(Toml, configFile)
|
||||||
)
|
)
|
||||||
|
|||||||
@ -8,7 +8,7 @@ type
|
|||||||
SecondsSince1970* = int64
|
SecondsSince1970* = int64
|
||||||
Timeout* = object of CatchableError
|
Timeout* = object of CatchableError
|
||||||
|
|
||||||
method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} =
|
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
|
||||||
|
|||||||
@ -11,7 +11,6 @@ import std/sequtils
|
|||||||
import std/strutils
|
import std/strutils
|
||||||
import std/os
|
import std/os
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/cpuinfo
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/presto
|
import pkg/presto
|
||||||
@ -24,7 +23,6 @@ import pkg/stew/shims/net as stewnet
|
|||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
import pkg/ethers except Rng
|
import pkg/ethers except Rng
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
import pkg/taskpools
|
|
||||||
|
|
||||||
import ./node
|
import ./node
|
||||||
import ./conf
|
import ./conf
|
||||||
@ -56,7 +54,6 @@ type
|
|||||||
codexNode: CodexNodeRef
|
codexNode: CodexNodeRef
|
||||||
repoStore: RepoStore
|
repoStore: RepoStore
|
||||||
maintenance: BlockMaintainer
|
maintenance: BlockMaintainer
|
||||||
taskpool: Taskpool
|
|
||||||
|
|
||||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||||
EthWallet = ethers.Wallet
|
EthWallet = ethers.Wallet
|
||||||
@ -174,10 +171,6 @@ proc start*(s: CodexServer) {.async.} =
|
|||||||
proc stop*(s: CodexServer) {.async.} =
|
proc stop*(s: CodexServer) {.async.} =
|
||||||
notice "Stopping codex node"
|
notice "Stopping codex node"
|
||||||
|
|
||||||
|
|
||||||
s.taskpool.syncAll()
|
|
||||||
s.taskpool.shutdown()
|
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
s.restServer.stop(),
|
s.restServer.stop(),
|
||||||
s.codexNode.switch.stop(),
|
s.codexNode.switch.stop(),
|
||||||
@ -266,15 +259,12 @@ proc new*(
|
|||||||
else:
|
else:
|
||||||
none Prover
|
none Prover
|
||||||
|
|
||||||
taskpool = Taskpool.new(num_threads = countProcessors())
|
|
||||||
|
|
||||||
codexNode = CodexNodeRef.new(
|
codexNode = CodexNodeRef.new(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
networkStore = store,
|
networkStore = store,
|
||||||
engine = engine,
|
engine = engine,
|
||||||
prover = prover,
|
|
||||||
discovery = discovery,
|
discovery = discovery,
|
||||||
taskpool = taskpool)
|
prover = prover)
|
||||||
|
|
||||||
restServer = RestServerRef.new(
|
restServer = RestServerRef.new(
|
||||||
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
|
||||||
@ -290,5 +280,4 @@ proc new*(
|
|||||||
codexNode: codexNode,
|
codexNode: codexNode,
|
||||||
restServer: restServer,
|
restServer: restServer,
|
||||||
repoStore: repoStore,
|
repoStore: repoStore,
|
||||||
maintenance: maintenance,
|
maintenance: maintenance)
|
||||||
taskpool: taskpool)
|
|
||||||
|
|||||||
@ -43,6 +43,7 @@ import ./units
|
|||||||
import ./utils
|
import ./utils
|
||||||
import ./nat
|
import ./nat
|
||||||
import ./utils/natutils
|
import ./utils/natutils
|
||||||
|
|
||||||
from ./validationconfig import MaxSlots, ValidationGroups
|
from ./validationconfig import MaxSlots, ValidationGroups
|
||||||
|
|
||||||
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
|
||||||
@ -119,9 +120,9 @@ type
|
|||||||
|
|
||||||
metricsAddress* {.
|
metricsAddress* {.
|
||||||
desc: "Listening address of the metrics server"
|
desc: "Listening address of the metrics server"
|
||||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
defaultValue: defaultAddress(config)
|
||||||
defaultValueDesc: "127.0.0.1"
|
defaultValueDesc: "127.0.0.1"
|
||||||
name: "metrics-address" }: ValidIpAddress
|
name: "metrics-address" }: IpAddress
|
||||||
|
|
||||||
metricsPort* {.
|
metricsPort* {.
|
||||||
desc: "Listening HTTP port of the metrics server"
|
desc: "Listening HTTP port of the metrics server"
|
||||||
@ -147,7 +148,7 @@ type
|
|||||||
nat* {.
|
nat* {.
|
||||||
desc: "Specify method to use for determining public address. " &
|
desc: "Specify method to use for determining public address. " &
|
||||||
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
||||||
defaultValue: NatConfig(hasExtIp: false, nat: NatAny)
|
defaultValue: defaultNatConfig()
|
||||||
defaultValueDesc: "any"
|
defaultValueDesc: "any"
|
||||||
name: "nat" }: NatConfig
|
name: "nat" }: NatConfig
|
||||||
|
|
||||||
@ -410,6 +411,12 @@ type
|
|||||||
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
||||||
logutils.formatIt(LogFormat.json, EthAddress): %it
|
logutils.formatIt(LogFormat.json, EthAddress): %it
|
||||||
|
|
||||||
|
func defaultAddress*(conf: CodexConf): IpAddress =
|
||||||
|
result = static parseIpAddress("127.0.0.1")
|
||||||
|
|
||||||
|
func defaultNatConfig*(): NatConfig =
|
||||||
|
result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
|
||||||
|
|
||||||
func persistence*(self: CodexConf): bool =
|
func persistence*(self: CodexConf): bool =
|
||||||
self.cmd == StartUpCmd.persistence
|
self.cmd == StartUpCmd.persistence
|
||||||
|
|
||||||
@ -442,13 +449,17 @@ const
|
|||||||
|
|
||||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||||
input: string): MultiAddress
|
input: string): MultiAddress
|
||||||
{.upraises: [ValueError, LPError].} =
|
{.upraises: [ValueError] .} =
|
||||||
var ma: MultiAddress
|
var ma: MultiAddress
|
||||||
let res = MultiAddress.init(input)
|
try:
|
||||||
if res.isOk:
|
let res = MultiAddress.init(input)
|
||||||
ma = res.get()
|
if res.isOk:
|
||||||
else:
|
ma = res.get()
|
||||||
warn "Invalid MultiAddress", input=input, error = res.error()
|
else:
|
||||||
|
warn "Invalid MultiAddress", input=input, error = res.error()
|
||||||
|
quit QuitFailure
|
||||||
|
except LPError as exc:
|
||||||
|
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
ma
|
ma
|
||||||
|
|
||||||
@ -458,6 +469,9 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
|||||||
if not res.fromURI(uri):
|
if not res.fromURI(uri):
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
except LPError as exc:
|
||||||
|
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||||
|
quit QuitFailure
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
@ -476,7 +490,7 @@ func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
|
|||||||
else:
|
else:
|
||||||
if p.startsWith("extip:"):
|
if p.startsWith("extip:"):
|
||||||
try:
|
try:
|
||||||
let ip = ValidIpAddress.init(p[6..^1])
|
let ip = parseIpAddress(p[6..^1])
|
||||||
NatConfig(hasExtIp: true, extIp: ip)
|
NatConfig(hasExtIp: true, extIp: ip)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
let error = "Not a valid IP address: " & p[6..^1]
|
let error = "Not a valid IP address: " & p[6..^1]
|
||||||
@ -516,7 +530,11 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
|||||||
error "invalid SignedPeerRecord configuration value", error = err.msg
|
error "invalid SignedPeerRecord configuration value", error = err.msg
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
val = SignedPeerRecord.parseCmdArg(uri)
|
try:
|
||||||
|
val = SignedPeerRecord.parseCmdArg(uri)
|
||||||
|
except LPError as err:
|
||||||
|
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
||||||
without input =? r.readValue(string).catch, err:
|
without input =? r.readValue(string).catch, err:
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import pkg/questionable/results
|
|||||||
import pkg/stew/shims/net
|
import pkg/stew/shims/net
|
||||||
import pkg/contractabi/address as ca
|
import pkg/contractabi/address as ca
|
||||||
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
||||||
|
from pkg/nimcrypto import keccak256
|
||||||
|
|
||||||
import ./rng
|
import ./rng
|
||||||
import ./errors
|
import ./errors
|
||||||
@ -124,7 +125,7 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
|||||||
|
|
||||||
method removeProvider*(
|
method removeProvider*(
|
||||||
d: Discovery,
|
d: Discovery,
|
||||||
peerId: PeerId): Future[void] {.base.} =
|
peerId: PeerId): Future[void] {.base, gcsafe.} =
|
||||||
## Remove provider from providers table
|
## Remove provider from providers table
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -169,7 +170,7 @@ proc stop*(d: Discovery) {.async.} =
|
|||||||
proc new*(
|
proc new*(
|
||||||
T: type Discovery,
|
T: type Discovery,
|
||||||
key: PrivateKey,
|
key: PrivateKey,
|
||||||
bindIp = ValidIpAddress.init(IPv4_any()),
|
bindIp = IPv4_any(),
|
||||||
bindPort = 0.Port,
|
bindPort = 0.Port,
|
||||||
announceAddrs: openArray[MultiAddress],
|
announceAddrs: openArray[MultiAddress],
|
||||||
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
||||||
@ -199,7 +200,7 @@ proc new*(
|
|||||||
|
|
||||||
self.protocol = newProtocol(
|
self.protocol = newProtocol(
|
||||||
key,
|
key,
|
||||||
bindIp = bindIp.toNormalIp,
|
bindIp = bindIp,
|
||||||
bindPort = bindPort,
|
bindPort = bindPort,
|
||||||
record = self.providerRecord.get,
|
record = self.providerRecord.get,
|
||||||
bootstrapRecords = bootstrapNodes,
|
bootstrapRecords = bootstrapNodes,
|
||||||
|
|||||||
@ -22,7 +22,7 @@ type
|
|||||||
EncoderBackend* = ref object of ErasureBackend
|
EncoderBackend* = ref object of ErasureBackend
|
||||||
DecoderBackend* = ref object of ErasureBackend
|
DecoderBackend* = ref object of ErasureBackend
|
||||||
|
|
||||||
method release*(self: ErasureBackend) {.base.} =
|
method release*(self: ErasureBackend) {.base, gcsafe.} =
|
||||||
## release the backend
|
## release the backend
|
||||||
##
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
@ -31,7 +31,7 @@ method encode*(
|
|||||||
self: EncoderBackend,
|
self: EncoderBackend,
|
||||||
buffers,
|
buffers,
|
||||||
parity: var openArray[seq[byte]]
|
parity: var openArray[seq[byte]]
|
||||||
): Result[void, cstring] {.base.} =
|
): Result[void, cstring] {.base, gcsafe.} =
|
||||||
## encode buffers using a backend
|
## encode buffers using a backend
|
||||||
##
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
@ -41,7 +41,7 @@ method decode*(
|
|||||||
buffers,
|
buffers,
|
||||||
parity,
|
parity,
|
||||||
recovered: var openArray[seq[byte]]
|
recovered: var openArray[seq[byte]]
|
||||||
): Result[void, cstring] {.base.} =
|
): Result[void, cstring] {.base, gcsafe.} =
|
||||||
## decode buffers using a backend
|
## decode buffers using a backend
|
||||||
##
|
##
|
||||||
raiseAssert("not implemented!")
|
raiseAssert("not implemented!")
|
||||||
|
|||||||
@ -17,7 +17,6 @@ import std/sugar
|
|||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p/[multicodec, cid, multihash]
|
import pkg/libp2p/[multicodec, cid, multihash]
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
import pkg/taskpools
|
|
||||||
|
|
||||||
import ../logutils
|
import ../logutils
|
||||||
import ../manifest
|
import ../manifest
|
||||||
@ -32,7 +31,6 @@ import ../errors
|
|||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
|
|
||||||
import ./backend
|
import ./backend
|
||||||
import ./asyncbackend
|
|
||||||
|
|
||||||
export backend
|
export backend
|
||||||
|
|
||||||
@ -73,7 +71,6 @@ type
|
|||||||
encoderProvider*: EncoderProvider
|
encoderProvider*: EncoderProvider
|
||||||
decoderProvider*: DecoderProvider
|
decoderProvider*: DecoderProvider
|
||||||
store*: BlockStore
|
store*: BlockStore
|
||||||
taskpool: Taskpool
|
|
||||||
|
|
||||||
EncodingParams = object
|
EncodingParams = object
|
||||||
ecK: Natural
|
ecK: Natural
|
||||||
@ -295,23 +292,30 @@ proc encodeData(
|
|||||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||||
var
|
var
|
||||||
data = seq[seq[byte]].new() # number of blocks to encode
|
data = seq[seq[byte]].new() # number of blocks to encode
|
||||||
|
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
||||||
|
|
||||||
data[].setLen(params.ecK)
|
data[].setLen(params.ecK)
|
||||||
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||||
|
# other events to be processed, this should be addressed
|
||||||
|
# by threading
|
||||||
|
await sleepAsync(10.millis)
|
||||||
|
|
||||||
without resolved =?
|
without resolved =?
|
||||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
trace "Erasure coding data", data = data[].len, parity = params.ecM
|
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
||||||
|
|
||||||
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
|
if (
|
||||||
trace "Error encoding data", err = err.msg
|
let res = encoder.encode(data[], parityData);
|
||||||
return failure(err)
|
res.isErr):
|
||||||
|
trace "Unable to encode manifest!", error = $res.error
|
||||||
|
return failure($res.error)
|
||||||
|
|
||||||
var idx = params.rounded + step
|
var idx = params.rounded + step
|
||||||
for j in 0..<params.ecM:
|
for j in 0..<params.ecM:
|
||||||
without blk =? bt.Block.new(parity[j]), error:
|
without blk =? bt.Block.new(parityData[j]), error:
|
||||||
trace "Unable to create parity block", err = error.msg
|
trace "Unable to create parity block", err = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
@ -396,15 +400,21 @@ proc decode*(
|
|||||||
cids[].setLen(encoded.blocksCount)
|
cids[].setLen(encoded.blocksCount)
|
||||||
try:
|
try:
|
||||||
for step in 0..<encoded.steps:
|
for step in 0..<encoded.steps:
|
||||||
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
||||||
|
# other events to be processed, this should be addressed
|
||||||
|
# by threading
|
||||||
|
await sleepAsync(10.millis)
|
||||||
|
|
||||||
var
|
var
|
||||||
data = seq[seq[byte]].new()
|
data = seq[seq[byte]].new()
|
||||||
parity = seq[seq[byte]].new()
|
parityData = seq[seq[byte]].new()
|
||||||
|
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
||||||
|
|
||||||
data[].setLen(encoded.ecK) # set len to K
|
data[].setLen(encoded.ecK) # set len to K
|
||||||
parity[].setLen(encoded.ecM) # set len to M
|
parityData[].setLen(encoded.ecM) # set len to M
|
||||||
|
|
||||||
without (dataPieces, _) =?
|
without (dataPieces, _) =?
|
||||||
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
|
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
@ -414,9 +424,11 @@ proc decode*(
|
|||||||
|
|
||||||
trace "Erasure decoding data"
|
trace "Erasure decoding data"
|
||||||
|
|
||||||
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
|
if (
|
||||||
trace "Error decoding data", err = err.msg
|
let err = decoder.decode(data[], parityData[], recovered);
|
||||||
return failure(err)
|
err.isErr):
|
||||||
|
trace "Unable to decode data!", err = $err.error
|
||||||
|
return failure($err.error)
|
||||||
|
|
||||||
for i in 0..<encoded.ecK:
|
for i in 0..<encoded.ecK:
|
||||||
let idx = i * encoded.steps + step
|
let idx = i * encoded.steps + step
|
||||||
@ -470,13 +482,11 @@ proc new*(
|
|||||||
T: type Erasure,
|
T: type Erasure,
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
encoderProvider: EncoderProvider,
|
encoderProvider: EncoderProvider,
|
||||||
decoderProvider: DecoderProvider,
|
decoderProvider: DecoderProvider): Erasure =
|
||||||
taskpool: Taskpool): Erasure =
|
|
||||||
## Create a new Erasure instance for encoding and decoding manifests
|
## Create a new Erasure instance for encoding and decoding manifests
|
||||||
##
|
##
|
||||||
|
|
||||||
Erasure(
|
Erasure(
|
||||||
store: store,
|
store: store,
|
||||||
encoderProvider: encoderProvider,
|
encoderProvider: encoderProvider,
|
||||||
decoderProvider: decoderProvider,
|
decoderProvider: decoderProvider)
|
||||||
taskpool: taskpool)
|
|
||||||
|
|||||||
@ -38,6 +38,29 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
|||||||
else:
|
else:
|
||||||
T.failure("Option is None")
|
T.failure("Option is None")
|
||||||
|
|
||||||
|
# allFuturesThrowing was moved to the tests in libp2p
|
||||||
|
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||||
|
var futs: seq[Future[T]]
|
||||||
|
for fut in args:
|
||||||
|
futs &= fut
|
||||||
|
proc call() {.async.} =
|
||||||
|
var first: ref CatchableError = nil
|
||||||
|
futs = await allFinished(futs)
|
||||||
|
for fut in futs:
|
||||||
|
if fut.failed:
|
||||||
|
let err = fut.readError()
|
||||||
|
if err of Defect:
|
||||||
|
raise err
|
||||||
|
else:
|
||||||
|
if err of CancelledError:
|
||||||
|
raise err
|
||||||
|
if isNil(first):
|
||||||
|
first = err
|
||||||
|
if not isNil(first):
|
||||||
|
raise first
|
||||||
|
|
||||||
|
return call()
|
||||||
|
|
||||||
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
|
||||||
try:
|
try:
|
||||||
await allFuturesThrowing(fut)
|
await allFuturesThrowing(fut)
|
||||||
|
|||||||
@ -47,6 +47,14 @@ type
|
|||||||
CodexProof* = ref object of ByteProof
|
CodexProof* = ref object of ByteProof
|
||||||
mcodec*: MultiCodec
|
mcodec*: MultiCodec
|
||||||
|
|
||||||
|
# CodeHashes is not exported from libp2p
|
||||||
|
# So we need to recreate it instead of
|
||||||
|
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
||||||
|
for item in HashesList:
|
||||||
|
result[item.mcodec] = item
|
||||||
|
|
||||||
|
const CodeHashes = initMultiHashCodeTable()
|
||||||
|
|
||||||
func mhash*(mcodec: MultiCodec): ?!MHash =
|
func mhash*(mcodec: MultiCodec): ?!MHash =
|
||||||
let
|
let
|
||||||
mhash = CodeHashes.getOrDefault(mcodec)
|
mhash = CodeHashes.getOrDefault(mcodec)
|
||||||
|
|||||||
@ -24,10 +24,10 @@ import ./merkletree
|
|||||||
export merkletree, poseidon2
|
export merkletree, poseidon2
|
||||||
|
|
||||||
const
|
const
|
||||||
KeyNoneF = F.fromhex("0x0")
|
KeyNoneF = F.fromHex("0x0")
|
||||||
KeyBottomLayerF = F.fromhex("0x1")
|
KeyBottomLayerF = F.fromHex("0x1")
|
||||||
KeyOddF = F.fromhex("0x2")
|
KeyOddF = F.fromHex("0x2")
|
||||||
KeyOddAndBottomLayerF = F.fromhex("0x3")
|
KeyOddAndBottomLayerF = F.fromHex("0x3")
|
||||||
|
|
||||||
Poseidon2Zero* = zero
|
Poseidon2Zero* = zero
|
||||||
|
|
||||||
|
|||||||
@ -73,6 +73,8 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
|
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
|
||||||
of NotAnIGD:
|
of NotAnIGD:
|
||||||
msg = "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
|
msg = "Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
|
||||||
|
of IGDIpNotRoutable:
|
||||||
|
msg = "Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
|
||||||
if not quiet:
|
if not quiet:
|
||||||
debug "UPnP", msg
|
debug "UPnP", msg
|
||||||
if canContinue:
|
if canContinue:
|
||||||
@ -115,7 +117,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
|
|||||||
# default) is a public IP. That's a long shot, because code paths involving a
|
# default) is a public IP. That's a long shot, because code paths involving a
|
||||||
# user-provided bind address are not supposed to get here.
|
# user-provided bind address are not supposed to get here.
|
||||||
proc getRoutePrefSrc(
|
proc getRoutePrefSrc(
|
||||||
bindIp: ValidIpAddress): (Option[ValidIpAddress], PrefSrcStatus) =
|
bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
|
||||||
let bindAddress = initTAddress(bindIp, Port(0))
|
let bindAddress = initTAddress(bindIp, Port(0))
|
||||||
|
|
||||||
if bindAddress.isAnyLocal():
|
if bindAddress.isAnyLocal():
|
||||||
@ -124,18 +126,18 @@ proc getRoutePrefSrc(
|
|||||||
# No route was found, log error and continue without IP.
|
# No route was found, log error and continue without IP.
|
||||||
error "No routable IP address found, check your network connection",
|
error "No routable IP address found, check your network connection",
|
||||||
error = ip.error
|
error = ip.error
|
||||||
return (none(ValidIpAddress), NoRoutingInfo)
|
return (none(IpAddress), NoRoutingInfo)
|
||||||
elif ip.get().isGlobalUnicast():
|
elif ip.get().isGlobalUnicast():
|
||||||
return (some(ip.get()), PrefSrcIsPublic)
|
return (some(ip.get()), PrefSrcIsPublic)
|
||||||
else:
|
else:
|
||||||
return (none(ValidIpAddress), PrefSrcIsPrivate)
|
return (none(IpAddress), PrefSrcIsPrivate)
|
||||||
elif bindAddress.isGlobalUnicast():
|
elif bindAddress.isGlobalUnicast():
|
||||||
return (some(ValidIpAddress.init(bindIp)), BindAddressIsPublic)
|
return (some(bindIp), BindAddressIsPublic)
|
||||||
else:
|
else:
|
||||||
return (none(ValidIpAddress), BindAddressIsPrivate)
|
return (none(IpAddress), BindAddressIsPrivate)
|
||||||
|
|
||||||
# Try to detect a public IP assigned to this host, before trying NAT traversal.
|
# Try to detect a public IP assigned to this host, before trying NAT traversal.
|
||||||
proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: ValidIpAddress, quiet = true): Option[ValidIpAddress] =
|
proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: IpAddress, quiet = true): Option[IpAddress] =
|
||||||
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
|
||||||
|
|
||||||
case prefSrcStatus:
|
case prefSrcStatus:
|
||||||
@ -144,7 +146,7 @@ proc getPublicRoutePrefSrcOrExternalIP*(natStrategy: NatStrategy, bindIp: ValidI
|
|||||||
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
of PrefSrcIsPrivate, BindAddressIsPrivate:
|
||||||
let extIp = getExternalIP(natStrategy, quiet)
|
let extIp = getExternalIP(natStrategy, quiet)
|
||||||
if extIp.isSome:
|
if extIp.isSome:
|
||||||
return some(ValidIpAddress.init(extIp.get))
|
return some(extIp.get)
|
||||||
|
|
||||||
proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] {.gcsafe.} =
|
proc doPortMapping(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] {.gcsafe.} =
|
||||||
var
|
var
|
||||||
@ -294,14 +296,14 @@ proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port,
|
|||||||
|
|
||||||
proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
||||||
clientId: string):
|
clientId: string):
|
||||||
tuple[ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port]] =
|
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
|
||||||
## Setup NAT port mapping and get external IP address.
|
## Setup NAT port mapping and get external IP address.
|
||||||
## If any of this fails, we don't return any IP address but do return the
|
## If any of this fails, we don't return any IP address but do return the
|
||||||
## original ports as best effort.
|
## original ports as best effort.
|
||||||
## TODO: Allow for tcp or udp port mapping to be optional.
|
## TODO: Allow for tcp or udp port mapping to be optional.
|
||||||
let extIp = getExternalIP(natStrategy)
|
let extIp = getExternalIP(natStrategy)
|
||||||
if extIp.isSome:
|
if extIp.isSome:
|
||||||
let ip = ValidIpAddress.init(extIp.get)
|
let ip = extIp.get
|
||||||
let extPorts = ({.gcsafe.}:
|
let extPorts = ({.gcsafe.}:
|
||||||
redirectPorts(tcpPort = tcpPort,
|
redirectPorts(tcpPort = tcpPort,
|
||||||
udpPort = udpPort,
|
udpPort = udpPort,
|
||||||
@ -311,20 +313,20 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
|||||||
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
|
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
|
||||||
else:
|
else:
|
||||||
warn "UPnP/NAT-PMP available but port forwarding failed"
|
warn "UPnP/NAT-PMP available but port forwarding failed"
|
||||||
(ip: none(ValidIpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||||
else:
|
else:
|
||||||
warn "UPnP/NAT-PMP not available"
|
warn "UPnP/NAT-PMP not available"
|
||||||
(ip: none(ValidIpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
|
||||||
|
|
||||||
type
|
type
|
||||||
NatConfig* = object
|
NatConfig* = object
|
||||||
case hasExtIp*: bool
|
case hasExtIp*: bool
|
||||||
of true: extIp*: ValidIpAddress
|
of true: extIp*: IpAddress
|
||||||
of false: nat*: NatStrategy
|
of false: nat*: NatStrategy
|
||||||
|
|
||||||
proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress,
|
proc setupAddress*(natConfig: NatConfig, bindIp: IpAddress,
|
||||||
tcpPort, udpPort: Port, clientId: string):
|
tcpPort, udpPort: Port, clientId: string):
|
||||||
tuple[ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port]]
|
tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]]
|
||||||
{.gcsafe.} =
|
{.gcsafe.} =
|
||||||
## Set-up of the external address via any of the ways as configured in
|
## Set-up of the external address via any of the ways as configured in
|
||||||
## `NatConfig`. In case all fails an error is logged and the bind ports are
|
## `NatConfig`. In case all fails an error is logged and the bind ports are
|
||||||
@ -353,10 +355,10 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress,
|
|||||||
return (prefSrcIp, some(tcpPort), some(udpPort))
|
return (prefSrcIp, some(tcpPort), some(udpPort))
|
||||||
of PrefSrcIsPrivate:
|
of PrefSrcIsPrivate:
|
||||||
error "No public IP address found. Should not use --nat:none option"
|
error "No public IP address found. Should not use --nat:none option"
|
||||||
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
return (none(IpAddress), some(tcpPort), some(udpPort))
|
||||||
of BindAddressIsPrivate:
|
of BindAddressIsPrivate:
|
||||||
error "Bind IP is not a public IP address. Should not use --nat:none option"
|
error "Bind IP is not a public IP address. Should not use --nat:none option"
|
||||||
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
return (none(IpAddress), some(tcpPort), some(udpPort))
|
||||||
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
|
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
|
||||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||||
|
|
||||||
|
|||||||
@ -13,7 +13,6 @@ import std/options
|
|||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/strformat
|
import std/strformat
|
||||||
import std/sugar
|
import std/sugar
|
||||||
import std/cpuinfo
|
|
||||||
import times
|
import times
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
@ -27,7 +26,6 @@ import pkg/libp2p/stream/bufferstream
|
|||||||
# TODO: remove once exported by libp2p
|
# TODO: remove once exported by libp2p
|
||||||
import pkg/libp2p/routing_record
|
import pkg/libp2p/routing_record
|
||||||
import pkg/libp2p/signed_envelope
|
import pkg/libp2p/signed_envelope
|
||||||
import pkg/taskpools
|
|
||||||
|
|
||||||
import ./chunker
|
import ./chunker
|
||||||
import ./slots
|
import ./slots
|
||||||
@ -71,7 +69,6 @@ type
|
|||||||
contracts*: Contracts
|
contracts*: Contracts
|
||||||
clock*: Clock
|
clock*: Clock
|
||||||
storage*: Contracts
|
storage*: Contracts
|
||||||
taskpool*: Taskpool
|
|
||||||
|
|
||||||
CodexNodeRef* = ref CodexNode
|
CodexNodeRef* = ref CodexNode
|
||||||
|
|
||||||
@ -213,7 +210,7 @@ proc fetchBatched*(
|
|||||||
proc streamSingleBlock(
|
proc streamSingleBlock(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef,
|
||||||
cid: Cid
|
cid: Cid
|
||||||
): Future[?!LPstream] {.async.} =
|
): Future[?!LPStream] {.async.} =
|
||||||
## Streams the contents of a single block.
|
## Streams the contents of a single block.
|
||||||
##
|
##
|
||||||
trace "Streaming single block", cid = cid
|
trace "Streaming single block", cid = cid
|
||||||
@ -253,8 +250,7 @@ proc streamEntireDataset(
|
|||||||
erasure = Erasure.new(
|
erasure = Erasure.new(
|
||||||
self.networkStore,
|
self.networkStore,
|
||||||
leoEncoderProvider,
|
leoEncoderProvider,
|
||||||
leoDecoderProvider,
|
leoDecoderProvider)
|
||||||
self.taskpool)
|
|
||||||
without _ =? (await erasure.decode(manifest)), error:
|
without _ =? (await erasure.decode(manifest)), error:
|
||||||
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
@ -424,8 +420,7 @@ proc setupRequest(
|
|||||||
erasure = Erasure.new(
|
erasure = Erasure.new(
|
||||||
self.networkStore.localStore,
|
self.networkStore.localStore,
|
||||||
leoEncoderProvider,
|
leoEncoderProvider,
|
||||||
leoDecoderProvider,
|
leoDecoderProvider)
|
||||||
self.taskpool)
|
|
||||||
|
|
||||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||||
trace "Unable to erasure code dataset"
|
trace "Unable to erasure code dataset"
|
||||||
@ -761,8 +756,7 @@ proc new*(
|
|||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine,
|
||||||
discovery: Discovery,
|
discovery: Discovery,
|
||||||
prover = Prover.none,
|
prover = Prover.none,
|
||||||
contracts = Contracts.default,
|
contracts = Contracts.default): CodexNodeRef =
|
||||||
taskpool = Taskpool.new(num_threads = countProcessors())): CodexNodeRef =
|
|
||||||
## Create new instance of a Codex self, call `start` to run it
|
## Create new instance of a Codex self, call `start` to run it
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -772,5 +766,4 @@ proc new*(
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
prover: prover,
|
prover: prover,
|
||||||
discovery: discovery,
|
discovery: discovery,
|
||||||
contracts: contracts,
|
contracts: contracts)
|
||||||
taskpool: taskpool)
|
|
||||||
|
|||||||
@ -90,7 +90,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode =
|
|||||||
peerId: node.record.data.peerId,
|
peerId: node.record.data.peerId,
|
||||||
record: node.record,
|
record: node.record,
|
||||||
address: node.address,
|
address: node.address,
|
||||||
seen: node.seen
|
seen: node.seen > 0.5
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
|
proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable =
|
||||||
|
|||||||
@ -36,7 +36,7 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
|
|||||||
slotIndex = data.slotIndex
|
slotIndex = data.slotIndex
|
||||||
|
|
||||||
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
|
||||||
var collateral: Uint256
|
var collateral: UInt256
|
||||||
|
|
||||||
if slotState == SlotState.Repair:
|
if slotState == SlotState.Repair:
|
||||||
# When repairing the node gets "discount" on the collateral that it needs to
|
# When repairing the node gets "discount" on the collateral that it needs to
|
||||||
|
|||||||
@ -43,7 +43,7 @@ proc zkeyFilePath(config: CodexConf): string =
|
|||||||
|
|
||||||
proc initializeFromCircuitDirFiles(
|
proc initializeFromCircuitDirFiles(
|
||||||
config: CodexConf,
|
config: CodexConf,
|
||||||
utils: BackendUtils): ?!AnyBackend =
|
utils: BackendUtils): ?!AnyBackend {.gcsafe.} =
|
||||||
if fileExists(config.r1csFilePath) and
|
if fileExists(config.r1csFilePath) and
|
||||||
fileExists(config.wasmFilePath) and
|
fileExists(config.wasmFilePath) and
|
||||||
fileExists(config.zkeyFilePath):
|
fileExists(config.zkeyFilePath):
|
||||||
|
|||||||
@ -76,7 +76,7 @@ proc release*(self: CircomCompat) =
|
|||||||
##
|
##
|
||||||
|
|
||||||
if not isNil(self.backendCfg):
|
if not isNil(self.backendCfg):
|
||||||
self.backendCfg.unsafeAddr.releaseCfg()
|
self.backendCfg.unsafeAddr.release_cfg()
|
||||||
|
|
||||||
if not isNil(self.vkp):
|
if not isNil(self.vkp):
|
||||||
self.vkp.unsafeAddr.release_key()
|
self.vkp.unsafeAddr.release_key()
|
||||||
@ -102,9 +102,9 @@ proc prove[H](
|
|||||||
|
|
||||||
defer:
|
defer:
|
||||||
if ctx != nil:
|
if ctx != nil:
|
||||||
ctx.addr.releaseCircomCompat()
|
ctx.addr.release_circom_compat()
|
||||||
|
|
||||||
if initCircomCompat(
|
if init_circom_compat(
|
||||||
self.backendCfg,
|
self.backendCfg,
|
||||||
addr ctx) != ERR_OK or ctx == nil:
|
addr ctx) != ERR_OK or ctx == nil:
|
||||||
raiseAssert("failed to initialize CircomCompat ctx")
|
raiseAssert("failed to initialize CircomCompat ctx")
|
||||||
@ -114,27 +114,27 @@ proc prove[H](
|
|||||||
dataSetRoot = input.datasetRoot.toBytes
|
dataSetRoot = input.datasetRoot.toBytes
|
||||||
slotRoot = input.slotRoot.toBytes
|
slotRoot = input.slotRoot.toBytes
|
||||||
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK:
|
"entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK:
|
||||||
return failure("Failed to push entropy")
|
return failure("Failed to push entropy")
|
||||||
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK:
|
"dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK:
|
||||||
return failure("Failed to push data set root")
|
return failure("Failed to push data set root")
|
||||||
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK:
|
"slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK:
|
||||||
return failure("Failed to push data set root")
|
return failure("Failed to push data set root")
|
||||||
|
|
||||||
if ctx.pushInputU32(
|
if ctx.push_input_u32(
|
||||||
"nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
|
"nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK:
|
||||||
return failure("Failed to push nCellsPerSlot")
|
return failure("Failed to push nCellsPerSlot")
|
||||||
|
|
||||||
if ctx.pushInputU32(
|
if ctx.push_input_u32(
|
||||||
"nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK:
|
"nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK:
|
||||||
return failure("Failed to push nSlotsPerDataSet")
|
return failure("Failed to push nSlotsPerDataSet")
|
||||||
|
|
||||||
if ctx.pushInputU32(
|
if ctx.push_input_u32(
|
||||||
"slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
|
"slotIndex".cstring, input.slotIndex.uint32) != ERR_OK:
|
||||||
return failure("Failed to push slotIndex")
|
return failure("Failed to push slotIndex")
|
||||||
|
|
||||||
@ -143,7 +143,7 @@ proc prove[H](
|
|||||||
|
|
||||||
doAssert(slotProof.len == self.datasetDepth)
|
doAssert(slotProof.len == self.datasetDepth)
|
||||||
# arrays are always flattened
|
# arrays are always flattened
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"slotProof".cstring,
|
"slotProof".cstring,
|
||||||
slotProof[0].addr,
|
slotProof[0].addr,
|
||||||
uint (slotProof[0].len * slotProof.len)) != ERR_OK:
|
uint (slotProof[0].len * slotProof.len)) != ERR_OK:
|
||||||
@ -154,13 +154,13 @@ proc prove[H](
|
|||||||
merklePaths = s.merklePaths.mapIt( it.toBytes )
|
merklePaths = s.merklePaths.mapIt( it.toBytes )
|
||||||
data = s.cellData.mapIt( @(it.toBytes) ).concat
|
data = s.cellData.mapIt( @(it.toBytes) ).concat
|
||||||
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"merklePaths".cstring,
|
"merklePaths".cstring,
|
||||||
merklePaths[0].addr,
|
merklePaths[0].addr,
|
||||||
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
||||||
return failure("Failed to push merkle paths")
|
return failure("Failed to push merkle paths")
|
||||||
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.push_input_u256_array(
|
||||||
"cellData".cstring,
|
"cellData".cstring,
|
||||||
data[0].addr,
|
data[0].addr,
|
||||||
data.len.uint) != ERR_OK:
|
data.len.uint) != ERR_OK:
|
||||||
@ -172,7 +172,7 @@ proc prove[H](
|
|||||||
let proof =
|
let proof =
|
||||||
try:
|
try:
|
||||||
if (
|
if (
|
||||||
let res = self.backendCfg.proveCircuit(ctx, proofPtr.addr);
|
let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr);
|
||||||
res != ERR_OK) or
|
res != ERR_OK) or
|
||||||
proofPtr == nil:
|
proofPtr == nil:
|
||||||
return failure("Failed to prove - err code: " & $res)
|
return failure("Failed to prove - err code: " & $res)
|
||||||
@ -180,7 +180,7 @@ proc prove[H](
|
|||||||
proofPtr[]
|
proofPtr[]
|
||||||
finally:
|
finally:
|
||||||
if proofPtr != nil:
|
if proofPtr != nil:
|
||||||
proofPtr.addr.releaseProof()
|
proofPtr.addr.release_proof()
|
||||||
|
|
||||||
success proof
|
success proof
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ proc verify*[H](
|
|||||||
inputs = inputs.toCircomInputs()
|
inputs = inputs.toCircomInputs()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
let res = verifyCircuit(proofPtr, inputs.addr, self.vkp)
|
let res = verify_circuit(proofPtr, inputs.addr, self.vkp)
|
||||||
if res == ERR_OK:
|
if res == ERR_OK:
|
||||||
success true
|
success true
|
||||||
elif res == ERR_FAILED_TO_VERIFY_PROOF:
|
elif res == ERR_FAILED_TO_VERIFY_PROOF:
|
||||||
@ -228,18 +228,18 @@ proc init*(
|
|||||||
var cfg: ptr CircomBn254Cfg
|
var cfg: ptr CircomBn254Cfg
|
||||||
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
|
var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil
|
||||||
|
|
||||||
if initCircomConfig(
|
if init_circom_config(
|
||||||
r1csPath.cstring,
|
r1csPath.cstring,
|
||||||
wasmPath.cstring,
|
wasmPath.cstring,
|
||||||
zkey, cfg.addr) != ERR_OK or cfg == nil:
|
zkey, cfg.addr) != ERR_OK or cfg == nil:
|
||||||
if cfg != nil: cfg.addr.releaseCfg()
|
if cfg != nil: cfg.addr.release_cfg()
|
||||||
raiseAssert("failed to initialize circom compat config")
|
raiseAssert("failed to initialize circom compat config")
|
||||||
|
|
||||||
var
|
var
|
||||||
vkpPtr: ptr VerifyingKey = nil
|
vkpPtr: ptr VerifyingKey = nil
|
||||||
|
|
||||||
if cfg.getVerifyingKey(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
|
if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil:
|
||||||
if vkpPtr != nil: vkpPtr.addr.releaseKey()
|
if vkpPtr != nil: vkpPtr.addr.release_key()
|
||||||
raiseAssert("Failed to get verifying key")
|
raiseAssert("Failed to get verifying key")
|
||||||
|
|
||||||
CircomCompat(
|
CircomCompat(
|
||||||
|
|||||||
@ -8,5 +8,5 @@ method initializeCircomBackend*(
|
|||||||
r1csFile: string,
|
r1csFile: string,
|
||||||
wasmFile: string,
|
wasmFile: string,
|
||||||
zKeyFile: string
|
zKeyFile: string
|
||||||
): AnyBackend {.base.} =
|
): AnyBackend {.base, gcsafe.}=
|
||||||
CircomCompat.init(r1csFile, wasmFile, zKeyFile)
|
CircomCompat.init(r1csFile, wasmFile, zKeyFile)
|
||||||
|
|||||||
@ -33,13 +33,13 @@ type
|
|||||||
BlockStore* = ref object of RootObj
|
BlockStore* = ref object of RootObj
|
||||||
onBlockStored*: ?CidCallback
|
onBlockStored*: ?CidCallback
|
||||||
|
|
||||||
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
|
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base, gcsafe.} =
|
||||||
## Get a block from the blockstore
|
## Get a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("getBlock by cid not implemented!")
|
raiseAssert("getBlock by cid not implemented!")
|
||||||
|
|
||||||
method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base.} =
|
method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base, gcsafe.} =
|
||||||
## Get a block from the blockstore
|
## Get a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -50,13 +50,13 @@ method getCid*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Cid] {.
|
|||||||
##
|
##
|
||||||
raiseAssert("getCid by treecid not implemented!")
|
raiseAssert("getCid by treecid not implemented!")
|
||||||
|
|
||||||
method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base.} =
|
method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base, gcsafe.} =
|
||||||
## Get a block from the blockstore
|
## Get a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("getBlock by addr not implemented!")
|
raiseAssert("getBlock by addr not implemented!")
|
||||||
|
|
||||||
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base.} =
|
method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base, gcsafe.} =
|
||||||
## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree
|
## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future
|
|||||||
method putBlock*(
|
method putBlock*(
|
||||||
self: BlockStore,
|
self: BlockStore,
|
||||||
blk: Block,
|
blk: Block,
|
||||||
ttl = Duration.none): Future[?!void] {.base.} =
|
ttl = Duration.none): Future[?!void] {.base, gcsafe.} =
|
||||||
## Put a block to the blockstore
|
## Put a block to the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ method putCidAndProof*(
|
|||||||
treeCid: Cid,
|
treeCid: Cid,
|
||||||
index: Natural,
|
index: Natural,
|
||||||
blockCid: Cid,
|
blockCid: Cid,
|
||||||
proof: CodexProof): Future[?!void] {.base.} =
|
proof: CodexProof): Future[?!void] {.base, gcsafe.} =
|
||||||
## Put a block proof to the blockstore
|
## Put a block proof to the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ method putCidAndProof*(
|
|||||||
method getCidAndProof*(
|
method getCidAndProof*(
|
||||||
self: BlockStore,
|
self: BlockStore,
|
||||||
treeCid: Cid,
|
treeCid: Cid,
|
||||||
index: Natural): Future[?!(Cid, CodexProof)] {.base.} =
|
index: Natural): Future[?!(Cid, CodexProof)] {.base, gcsafe.} =
|
||||||
## Get a block proof from the blockstore
|
## Get a block proof from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ method getCidAndProof*(
|
|||||||
method ensureExpiry*(
|
method ensureExpiry*(
|
||||||
self: BlockStore,
|
self: BlockStore,
|
||||||
cid: Cid,
|
cid: Cid,
|
||||||
expiry: SecondsSince1970): Future[?!void] {.base.} =
|
expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} =
|
||||||
## Ensure that block's assosicated expiry is at least given timestamp
|
## Ensure that block's assosicated expiry is at least given timestamp
|
||||||
## If the current expiry is lower then it is updated to the given one, otherwise it is left intact
|
## If the current expiry is lower then it is updated to the given one, otherwise it is left intact
|
||||||
##
|
##
|
||||||
@ -105,32 +105,32 @@ method ensureExpiry*(
|
|||||||
self: BlockStore,
|
self: BlockStore,
|
||||||
treeCid: Cid,
|
treeCid: Cid,
|
||||||
index: Natural,
|
index: Natural,
|
||||||
expiry: SecondsSince1970): Future[?!void] {.base.} =
|
expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} =
|
||||||
## Ensure that block's associated expiry is at least given timestamp
|
## Ensure that block's associated expiry is at least given timestamp
|
||||||
## If the current expiry is lower then it is updated to the given one, otherwise it is left intact
|
## If the current expiry is lower then it is updated to the given one, otherwise it is left intact
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("Not implemented!")
|
raiseAssert("Not implemented!")
|
||||||
|
|
||||||
method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base.} =
|
method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base, gcsafe.} =
|
||||||
## Delete a block from the blockstore
|
## Delete a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("delBlock not implemented!")
|
raiseAssert("delBlock not implemented!")
|
||||||
|
|
||||||
method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base.} =
|
method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base, gcsafe.} =
|
||||||
## Delete a block from the blockstore
|
## Delete a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("delBlock not implemented!")
|
raiseAssert("delBlock not implemented!")
|
||||||
|
|
||||||
method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} =
|
method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base, gcsafe.} =
|
||||||
## Check if the block exists in the blockstore
|
## Check if the block exists in the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("hasBlock not implemented!")
|
raiseAssert("hasBlock not implemented!")
|
||||||
|
|
||||||
method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base.} =
|
method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base, gcsafe.} =
|
||||||
## Check if the block exists in the blockstore
|
## Check if the block exists in the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -138,13 +138,13 @@ method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.
|
|||||||
|
|
||||||
method listBlocks*(
|
method listBlocks*(
|
||||||
self: BlockStore,
|
self: BlockStore,
|
||||||
blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base.} =
|
blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} =
|
||||||
## Get the list of blocks in the BlockStore. This is an intensive operation
|
## Get the list of blocks in the BlockStore. This is an intensive operation
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("listBlocks not implemented!")
|
raiseAssert("listBlocks not implemented!")
|
||||||
|
|
||||||
method close*(self: BlockStore): Future[void] {.base.} =
|
method close*(self: BlockStore): Future[void] {.base, gcsafe.} =
|
||||||
## Close the blockstore, cleaning up resources managed by it.
|
## Close the blockstore, cleaning up resources managed by it.
|
||||||
## For some implementations this may be a no-op
|
## For some implementations this may be a no-op
|
||||||
##
|
##
|
||||||
|
|||||||
@ -323,15 +323,16 @@ method getBlockExpirations*(
|
|||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
let
|
let
|
||||||
filteredIter = await asyncQueryIter.filterSuccess()
|
filteredIter: AsyncIter[KeyVal[BlockMetadata]] = await asyncQueryIter.filterSuccess()
|
||||||
blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter,
|
|
||||||
proc (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} =
|
|
||||||
without cid =? Cid.init(kv.key.value).mapFailure, err:
|
|
||||||
error "Failed decoding cid", err = err.msg
|
|
||||||
return BlockExpiration.none
|
|
||||||
|
|
||||||
BlockExpiration(cid: cid, expiry: kv.value.expiry).some
|
proc mapping (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} =
|
||||||
)
|
without cid =? Cid.init(kv.key.value).mapFailure, err:
|
||||||
|
error "Failed decoding cid", err = err.msg
|
||||||
|
return BlockExpiration.none
|
||||||
|
|
||||||
|
BlockExpiration(cid: cid, expiry: kv.value.expiry).some
|
||||||
|
|
||||||
|
let blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping)
|
||||||
|
|
||||||
success(blockExpIter)
|
success(blockExpIter)
|
||||||
|
|
||||||
|
|||||||
@ -64,7 +64,7 @@ method readOnce*(
|
|||||||
self: AsyncStreamWrapper,
|
self: AsyncStreamWrapper,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
nbytes: int
|
nbytes: int
|
||||||
): Future[int] {.async.} =
|
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||||
|
|
||||||
trace "Reading bytes from reader", bytes = nbytes
|
trace "Reading bytes from reader", bytes = nbytes
|
||||||
if isNil(self.reader):
|
if isNil(self.reader):
|
||||||
@ -118,7 +118,7 @@ method closed*(self: AsyncStreamWrapper): bool =
|
|||||||
method atEof*(self: AsyncStreamWrapper): bool =
|
method atEof*(self: AsyncStreamWrapper): bool =
|
||||||
self.reader.atEof()
|
self.reader.atEof()
|
||||||
|
|
||||||
method closeImpl*(self: AsyncStreamWrapper) {.async.} =
|
method closeImpl*(self: AsyncStreamWrapper) {.async: (raises: []).} =
|
||||||
try:
|
try:
|
||||||
trace "Shutting down async chronos stream"
|
trace "Shutting down async chronos stream"
|
||||||
if not self.closed():
|
if not self.closed():
|
||||||
@ -130,7 +130,7 @@ method closeImpl*(self: AsyncStreamWrapper) {.async.} =
|
|||||||
|
|
||||||
trace "Shutdown async chronos stream"
|
trace "Shutdown async chronos stream"
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
error "Error received cancelled error when closing chronos stream", msg = exc.msg
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Error closing async chronos stream", msg = exc.msg
|
trace "Error closing async chronos stream", msg = exc.msg
|
||||||
|
|
||||||
|
|||||||
@ -73,11 +73,20 @@ proc `size=`*(self: StoreStream, size: int)
|
|||||||
method atEof*(self: StoreStream): bool =
|
method atEof*(self: StoreStream): bool =
|
||||||
self.offset >= self.size
|
self.offset >= self.size
|
||||||
|
|
||||||
|
type LPStreamReadError* = object of LPStreamError
|
||||||
|
par*: ref CatchableError
|
||||||
|
|
||||||
|
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
|
||||||
|
var w = newException(LPStreamReadError, "Read stream failed")
|
||||||
|
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
|
||||||
|
w.par = p
|
||||||
|
result = w
|
||||||
|
|
||||||
method readOnce*(
|
method readOnce*(
|
||||||
self: StoreStream,
|
self: StoreStream,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
nbytes: int
|
nbytes: int
|
||||||
): Future[int] {.async.} =
|
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||||
## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`.
|
## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`.
|
||||||
## Return how many bytes were actually read before EOF was encountered.
|
## Return how many bytes were actually read before EOF was encountered.
|
||||||
## Raise exception if we are already at EOF.
|
## Raise exception if we are already at EOF.
|
||||||
@ -100,8 +109,9 @@ method readOnce*(
|
|||||||
self.manifest.blockSize.int - blockOffset])
|
self.manifest.blockSize.int - blockOffset])
|
||||||
address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
|
address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
|
||||||
|
|
||||||
|
|
||||||
# Read contents of block `blockNum`
|
# Read contents of block `blockNum`
|
||||||
without blk =? await self.store.getBlock(address), error:
|
without blk =? (await self.store.getBlock(address)).tryGet.catch, error:
|
||||||
raise newLPStreamReadError(error)
|
raise newLPStreamReadError(error)
|
||||||
|
|
||||||
trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset
|
trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import pkg/stew/endians2
|
|||||||
|
|
||||||
func remapAddr*(
|
func remapAddr*(
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
ip: Option[ValidIpAddress] = ValidIpAddress.none,
|
ip: Option[IpAddress] = IpAddress.none,
|
||||||
port: Option[Port] = Port.none
|
port: Option[Port] = Port.none
|
||||||
): MultiAddress =
|
): MultiAddress =
|
||||||
## Remap addresses to new IP and/or Port
|
## Remap addresses to new IP and/or Port
|
||||||
@ -41,7 +41,7 @@ func remapAddr*(
|
|||||||
MultiAddress.init(parts.join("/"))
|
MultiAddress.init(parts.join("/"))
|
||||||
.expect("Should construct multiaddress")
|
.expect("Should construct multiaddress")
|
||||||
|
|
||||||
proc getMultiAddrWithIPAndUDPPort*(ip: ValidIpAddress, port: Port): MultiAddress =
|
proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress =
|
||||||
## Creates a MultiAddress with the specified IP address and UDP port
|
## Creates a MultiAddress with the specified IP address and UDP port
|
||||||
##
|
##
|
||||||
## Parameters:
|
## Parameters:
|
||||||
@ -54,7 +54,7 @@ proc getMultiAddrWithIPAndUDPPort*(ip: ValidIpAddress, port: Port): MultiAddress
|
|||||||
let ipFamily = if ip.family == IpAddressFamily.IPv4: "/ip4/" else: "/ip6/"
|
let ipFamily = if ip.family == IpAddressFamily.IPv4: "/ip4/" else: "/ip6/"
|
||||||
return MultiAddress.init(ipFamily & $ip & "/udp/" & $port).expect("valid multiaddr")
|
return MultiAddress.init(ipFamily & $ip & "/udp/" & $port).expect("valid multiaddr")
|
||||||
|
|
||||||
proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], port: Option[Port]] =
|
proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[IpAddress], port: Option[Port]] =
|
||||||
try:
|
try:
|
||||||
# Try IPv4 first
|
# Try IPv4 first
|
||||||
let ipv4Result = ma[multiCodec("ip4")]
|
let ipv4Result = ma[multiCodec("ip4")]
|
||||||
@ -63,7 +63,7 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por
|
|||||||
.protoArgument()
|
.protoArgument()
|
||||||
.expect("Invalid IPv4 format")
|
.expect("Invalid IPv4 format")
|
||||||
let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]]
|
let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]]
|
||||||
some(ipv4(ipArray))
|
some(IpAddress(family: IPv4, address_v4: ipArray))
|
||||||
else:
|
else:
|
||||||
# Try IPv6 if IPv4 not found
|
# Try IPv6 if IPv4 not found
|
||||||
let ipv6Result = ma[multiCodec("ip6")]
|
let ipv6Result = ma[multiCodec("ip6")]
|
||||||
@ -74,9 +74,9 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por
|
|||||||
var ipArray: array[16, byte]
|
var ipArray: array[16, byte]
|
||||||
for i in 0..15:
|
for i in 0..15:
|
||||||
ipArray[i] = ipBytes[i]
|
ipArray[i] = ipBytes[i]
|
||||||
some(ipv6(ipArray))
|
some(IpAddress(family: IPv6, address_v6: ipArray))
|
||||||
else:
|
else:
|
||||||
none(ValidIpAddress)
|
none(IpAddress)
|
||||||
|
|
||||||
# Get TCP Port
|
# Get TCP Port
|
||||||
let portResult = ma[multiCodec("tcp")]
|
let portResult = ma[multiCodec("tcp")]
|
||||||
@ -89,4 +89,4 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por
|
|||||||
none(Port)
|
none(Port)
|
||||||
(ip: ip, port: port)
|
(ip: ip, port: port)
|
||||||
except Exception:
|
except Exception:
|
||||||
(ip: none(ValidIpAddress), port: none(Port))
|
(ip: none(IpAddress), port: none(Port))
|
||||||
|
|||||||
@ -22,7 +22,7 @@ logScope:
|
|||||||
proc new*[T: Machine](_: type T): T =
|
proc new*[T: Machine](_: type T): T =
|
||||||
T(trackedFutures: TrackedFutures.new())
|
T(trackedFutures: TrackedFutures.new())
|
||||||
|
|
||||||
method `$`*(state: State): string {.base.} =
|
method `$`*(state: State): string {.base, gcsafe.} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
proc transition(_: type Event, previous, next: State): Event =
|
proc transition(_: type Event, previous, next: State): Event =
|
||||||
|
|||||||
@ -16,14 +16,14 @@ type
|
|||||||
type
|
type
|
||||||
IpLimits* = object
|
IpLimits* = object
|
||||||
limit*: uint
|
limit*: uint
|
||||||
ips: Table[ValidIpAddress, uint]
|
ips: Table[IpAddress, uint]
|
||||||
|
|
||||||
func hash*(ip: ValidIpAddress): Hash =
|
func hash*(ip: IpAddress): Hash =
|
||||||
case ip.family
|
case ip.family
|
||||||
of IpAddressFamily.IPv6: hash(ip.address_v6)
|
of IpAddressFamily.IPv6: hash(ip.address_v6)
|
||||||
of IpAddressFamily.IPv4: hash(ip.address_v4)
|
of IpAddressFamily.IPv4: hash(ip.address_v4)
|
||||||
|
|
||||||
func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
|
func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
|
||||||
let val = ipLimits.ips.getOrDefault(ip, 0)
|
let val = ipLimits.ips.getOrDefault(ip, 0)
|
||||||
if val < ipLimits.limit:
|
if val < ipLimits.limit:
|
||||||
ipLimits.ips[ip] = val + 1
|
ipLimits.ips[ip] = val + 1
|
||||||
@ -31,7 +31,7 @@ func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
|
|||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) =
|
func dec*(ipLimits: var IpLimits, ip: IpAddress) =
|
||||||
let val = ipLimits.ips.getOrDefault(ip, 0)
|
let val = ipLimits.ips.getOrDefault(ip, 0)
|
||||||
if val == 1:
|
if val == 1:
|
||||||
ipLimits.ips.del(ip)
|
ipLimits.ips.del(ip)
|
||||||
@ -48,7 +48,7 @@ func isGlobalUnicast*(address: IpAddress): bool =
|
|||||||
let a = initTAddress(address, Port(0))
|
let a = initTAddress(address, Port(0))
|
||||||
a.isGlobalUnicast()
|
a.isGlobalUnicast()
|
||||||
|
|
||||||
proc getRouteIpv4*(): Result[ValidIpAddress, cstring] =
|
proc getRouteIpv4*(): Result[IpAddress, cstring] =
|
||||||
# Avoiding Exception with initTAddress and can't make it work with static.
|
# Avoiding Exception with initTAddress and can't make it work with static.
|
||||||
# Note: `publicAddress` is only used an "example" IP to find the best route,
|
# Note: `publicAddress` is only used an "example" IP to find the best route,
|
||||||
# no data is send over the network to this IP!
|
# no data is send over the network to this IP!
|
||||||
@ -65,4 +65,4 @@ proc getRouteIpv4*(): Result[ValidIpAddress, cstring] =
|
|||||||
# This should not occur really.
|
# This should not occur really.
|
||||||
error "Address conversion error", exception = e.name, msg = e.msg
|
error "Address conversion error", exception = e.name, msg = e.msg
|
||||||
return err("Invalid IP address")
|
return err("Invalid IP address")
|
||||||
ok(ValidIpAddress.init(ip))
|
ok(ip)
|
||||||
@ -8,6 +8,14 @@ proc `as`*[T](value: T, U: type): ?U =
|
|||||||
## Casts a value to another type, returns an Option.
|
## Casts a value to another type, returns an Option.
|
||||||
## When the cast succeeds, the option will contain the casted value.
|
## When the cast succeeds, the option will contain the casted value.
|
||||||
## When the cast fails, the option will have no value.
|
## When the cast fails, the option will have no value.
|
||||||
|
|
||||||
|
# In Nim 2.0.x, check 42.some as int == none(int)
|
||||||
|
# Maybe because some 42.some looks like Option[Option[int]]
|
||||||
|
# So we check first that the value is an option of the expected type.
|
||||||
|
# In that case, we do not need to do anything, just return the value as it is.
|
||||||
|
when value is Option[U]:
|
||||||
|
return value
|
||||||
|
|
||||||
when value is U:
|
when value is U:
|
||||||
return some value
|
return some value
|
||||||
elif value is ref object:
|
elif value is ref object:
|
||||||
|
|||||||
@ -40,7 +40,7 @@ proc timerLoop(timer: Timer) {.async: (raises: []).} =
|
|||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg
|
error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg
|
||||||
|
|
||||||
method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.base.} =
|
method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsafe, base.} =
|
||||||
if timer.loopFuture != nil:
|
if timer.loopFuture != nil:
|
||||||
return
|
return
|
||||||
trace "Timer starting: ", name=timer.name
|
trace "Timer starting: ", name=timer.name
|
||||||
|
|||||||
@ -41,7 +41,7 @@ when defined(windows):
|
|||||||
|
|
||||||
# The dynamic Chronicles output currently prevents us from using colors on Windows
|
# The dynamic Chronicles output currently prevents us from using colors on Windows
|
||||||
# because these require direct manipulations of the stdout File object.
|
# because these require direct manipulations of the stdout File object.
|
||||||
switch("define", "chronicles_colors=off")
|
switch("define", "chronicles_colors=NoColors")
|
||||||
|
|
||||||
# This helps especially for 32-bit x86, which sans SSE2 and newer instructions
|
# This helps especially for 32-bit x86, which sans SSE2 and newer instructions
|
||||||
# requires quite roundabout code generation for cryptography, and other 64-bit
|
# requires quite roundabout code generation for cryptography, and other 64-bit
|
||||||
@ -85,6 +85,8 @@ when (NimMajor, NimMinor) >= (1, 6):
|
|||||||
--warning:"DotLikeOps:off"
|
--warning:"DotLikeOps:off"
|
||||||
when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
|
when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
|
||||||
--warning:"BareExcept:off"
|
--warning:"BareExcept:off"
|
||||||
|
when (NimMajor, NimMinor) >= (2, 0):
|
||||||
|
--mm:refc
|
||||||
|
|
||||||
switch("define", "withoutPCRE")
|
switch("define", "withoutPCRE")
|
||||||
|
|
||||||
|
|||||||
@ -46,7 +46,8 @@ proc generateNodes*(
|
|||||||
networkStore = NetworkStore.new(engine, localStore)
|
networkStore = NetworkStore.new(engine, localStore)
|
||||||
|
|
||||||
switch.mount(network)
|
switch.mount(network)
|
||||||
result.add((
|
|
||||||
|
let nc : NodesComponents = (
|
||||||
switch,
|
switch,
|
||||||
discovery,
|
discovery,
|
||||||
wallet,
|
wallet,
|
||||||
@ -56,7 +57,9 @@ proc generateNodes*(
|
|||||||
pendingBlocks,
|
pendingBlocks,
|
||||||
blockDiscovery,
|
blockDiscovery,
|
||||||
engine,
|
engine,
|
||||||
networkStore))
|
networkStore)
|
||||||
|
|
||||||
|
result.add(nc)
|
||||||
|
|
||||||
proc connectNodes*(nodes: seq[Switch]) {.async.} =
|
proc connectNodes*(nodes: seq[Switch]) {.async.} =
|
||||||
for dialer in nodes:
|
for dialer in nodes:
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import ../helpers
|
|||||||
|
|
||||||
export merkletree, helpers
|
export merkletree, helpers
|
||||||
|
|
||||||
converter toBool*(x: CtBool): bool =
|
converter toBool*(x: CTBool): bool =
|
||||||
bool(x)
|
bool(x)
|
||||||
|
|
||||||
proc `==`*(a, b: Poseidon2Tree): bool =
|
proc `==`*(a, b: Poseidon2Tree): bool =
|
||||||
|
|||||||
@ -70,8 +70,8 @@ template setupAndTearDown*() {.dirty.} =
|
|||||||
network: BlockExcNetwork
|
network: BlockExcNetwork
|
||||||
clock: Clock
|
clock: Clock
|
||||||
localStore: RepoStore
|
localStore: RepoStore
|
||||||
localStoreRepoDs: DataStore
|
localStoreRepoDs: Datastore
|
||||||
localStoreMetaDs: DataStore
|
localStoreMetaDs: Datastore
|
||||||
engine: BlockExcEngine
|
engine: BlockExcEngine
|
||||||
store: NetworkStore
|
store: NetworkStore
|
||||||
node: CodexNodeRef
|
node: CodexNodeRef
|
||||||
@ -80,7 +80,6 @@ template setupAndTearDown*() {.dirty.} =
|
|||||||
pendingBlocks: PendingBlocksManager
|
pendingBlocks: PendingBlocksManager
|
||||||
discovery: DiscoveryEngine
|
discovery: DiscoveryEngine
|
||||||
advertiser: Advertiser
|
advertiser: Advertiser
|
||||||
taskpool: Taskpool
|
|
||||||
|
|
||||||
let
|
let
|
||||||
path = currentSourcePath().parentDir
|
path = currentSourcePath().parentDir
|
||||||
@ -110,14 +109,12 @@ template setupAndTearDown*() {.dirty.} =
|
|||||||
advertiser = Advertiser.new(localStore, blockDiscovery)
|
advertiser = Advertiser.new(localStore, blockDiscovery)
|
||||||
engine = BlockExcEngine.new(localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks)
|
engine = BlockExcEngine.new(localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks)
|
||||||
store = NetworkStore.new(engine, localStore)
|
store = NetworkStore.new(engine, localStore)
|
||||||
taskpool = Taskpool.new(num_threads = countProcessors())
|
|
||||||
node = CodexNodeRef.new(
|
node = CodexNodeRef.new(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
networkStore = store,
|
networkStore = store,
|
||||||
engine = engine,
|
engine = engine,
|
||||||
prover = Prover.none,
|
prover = Prover.none,
|
||||||
discovery = blockDiscovery,
|
discovery = blockDiscovery)
|
||||||
taskpool = taskpool)
|
|
||||||
|
|
||||||
teardown:
|
teardown:
|
||||||
close(file)
|
close(file)
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import std/os
|
|||||||
import std/options
|
import std/options
|
||||||
import std/times
|
import std/times
|
||||||
import std/importutils
|
import std/importutils
|
||||||
import std/cpuinfo
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
@ -76,7 +75,7 @@ asyncchecksuite "Test Node - Host contracts":
|
|||||||
manifestBlock = bt.Block.new(
|
manifestBlock = bt.Block.new(
|
||||||
manifest.encode().tryGet(),
|
manifest.encode().tryGet(),
|
||||||
codec = ManifestCodec).tryGet()
|
codec = ManifestCodec).tryGet()
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||||
|
|
||||||
manifestCid = manifestBlock.cid
|
manifestCid = manifestBlock.cid
|
||||||
manifestCidStr = $(manifestCid)
|
manifestCidStr = $(manifestCid)
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import std/os
|
|||||||
import std/options
|
import std/options
|
||||||
import std/math
|
import std/math
|
||||||
import std/importutils
|
import std/importutils
|
||||||
import std/cpuinfo
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
@ -13,7 +12,6 @@ import pkg/questionable/results
|
|||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/poseidon2
|
import pkg/poseidon2
|
||||||
import pkg/poseidon2/io
|
import pkg/poseidon2/io
|
||||||
import pkg/taskpools
|
|
||||||
|
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/codexdht/discv5/protocol as discv5
|
import pkg/codexdht/discv5/protocol as discv5
|
||||||
@ -139,7 +137,7 @@ asyncchecksuite "Test Node - Basic":
|
|||||||
|
|
||||||
test "Setup purchase request":
|
test "Setup purchase request":
|
||||||
let
|
let
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||||
manifest = await storeDataGetManifest(localStore, chunker)
|
manifest = await storeDataGetManifest(localStore, chunker)
|
||||||
manifestBlock = bt.Block.new(
|
manifestBlock = bt.Block.new(
|
||||||
manifest.encode().tryGet(),
|
manifest.encode().tryGet(),
|
||||||
|
|||||||
@ -101,7 +101,7 @@ asyncchecksuite "Sales agent":
|
|||||||
clock.set(market.requestExpiry[request.id] + 1)
|
clock.set(market.requestExpiry[request.id] + 1)
|
||||||
check eventually onCancelCalled
|
check eventually onCancelCalled
|
||||||
|
|
||||||
for requestState in {RequestState.New, Started, Finished, Failed}:
|
for requestState in {RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed}:
|
||||||
test "onCancelled is not called when request state is " & $requestState:
|
test "onCancelled is not called when request state is " & $requestState:
|
||||||
agent.start(MockState.new())
|
agent.start(MockState.new())
|
||||||
await agent.subscribe()
|
await agent.subscribe()
|
||||||
@ -110,7 +110,7 @@ asyncchecksuite "Sales agent":
|
|||||||
await sleepAsync(100.millis)
|
await sleepAsync(100.millis)
|
||||||
check not onCancelCalled
|
check not onCancelCalled
|
||||||
|
|
||||||
for requestState in {RequestState.Started, Finished, Failed}:
|
for requestState in {RequestState.Started, RequestState.Finished, RequestState.Failed}:
|
||||||
test "cancelled future is finished when request state is " & $requestState:
|
test "cancelled future is finished when request state is " & $requestState:
|
||||||
agent.start(MockState.new())
|
agent.start(MockState.new())
|
||||||
await agent.subscribe()
|
await agent.subscribe()
|
||||||
|
|||||||
@ -96,11 +96,11 @@ proc createProtectedManifest*(
|
|||||||
protectedTreeCid = protectedTree.rootCid().tryGet()
|
protectedTreeCid = protectedTree.rootCid().tryGet()
|
||||||
|
|
||||||
for index, cid in cids[0..<numDatasetBlocks]:
|
for index, cid in cids[0..<numDatasetBlocks]:
|
||||||
let proof = datasetTree.getProof(index).tryget()
|
let proof = datasetTree.getProof(index).tryGet()
|
||||||
(await store.putCidAndProof(datasetTreeCid, index, cid, proof)).tryGet
|
(await store.putCidAndProof(datasetTreeCid, index, cid, proof)).tryGet
|
||||||
|
|
||||||
for index, cid in cids:
|
for index, cid in cids:
|
||||||
let proof = protectedTree.getProof(index).tryget()
|
let proof = protectedTree.getProof(index).tryGet()
|
||||||
(await store.putCidAndProof(protectedTreeCid, index, cid, proof)).tryGet
|
(await store.putCidAndProof(protectedTreeCid, index, cid, proof)).tryGet
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import pkg/codex/conf
|
|||||||
import pkg/codex/slots/proofs/backends
|
import pkg/codex/slots/proofs/backends
|
||||||
import pkg/codex/slots/proofs/backendfactory
|
import pkg/codex/slots/proofs/backendfactory
|
||||||
import pkg/codex/slots/proofs/backendutils
|
import pkg/codex/slots/proofs/backendutils
|
||||||
|
import pkg/codex/utils/natutils
|
||||||
|
|
||||||
import ../helpers
|
import ../helpers
|
||||||
import ../examples
|
import ../examples
|
||||||
@ -50,7 +51,7 @@ suite "Test BackendFactory":
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
metricsAddress: parseIpAddress("127.0.0.1"),
|
||||||
persistenceCmd: PersistenceCmd.prover,
|
persistenceCmd: PersistenceCmd.prover,
|
||||||
marketplaceAddress: EthAddress.example.some,
|
marketplaceAddress: EthAddress.example.some,
|
||||||
circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"),
|
circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"),
|
||||||
@ -72,7 +73,7 @@ suite "Test BackendFactory":
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
metricsAddress: parseIpAddress("127.0.0.1"),
|
||||||
persistenceCmd: PersistenceCmd.prover,
|
persistenceCmd: PersistenceCmd.prover,
|
||||||
marketplaceAddress: EthAddress.example.some,
|
marketplaceAddress: EthAddress.example.some,
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ suite "Test BackendFactory":
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
metricsAddress: parseIpAddress("127.0.0.1"),
|
||||||
persistenceCmd: PersistenceCmd.prover,
|
persistenceCmd: PersistenceCmd.prover,
|
||||||
marketplaceAddress: EthAddress.example.some,
|
marketplaceAddress: EthAddress.example.some,
|
||||||
circuitDir: OutDir(circuitDir)
|
circuitDir: OutDir(circuitDir)
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import pkg/confutils/defs
|
|||||||
import pkg/poseidon2/io
|
import pkg/poseidon2/io
|
||||||
import pkg/codex/utils/poseidon2digest
|
import pkg/codex/utils/poseidon2digest
|
||||||
import pkg/codex/nat
|
import pkg/codex/nat
|
||||||
|
import pkg/codex/utils/natutils
|
||||||
import ./helpers
|
import ./helpers
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
@ -38,7 +39,7 @@ suite "Test Prover":
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
metricsAddress: parseIpAddress("127.0.0.1"),
|
||||||
persistenceCmd: PersistenceCmd.prover,
|
persistenceCmd: PersistenceCmd.prover,
|
||||||
circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"),
|
circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"),
|
||||||
circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"),
|
circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"),
|
||||||
|
|||||||
@ -163,8 +163,9 @@ asyncchecksuite "Asynchronous Tests":
|
|||||||
|
|
||||||
test "Test update":
|
test "Test update":
|
||||||
var heap = newAsyncHeapQueue[Task](5)
|
var heap = newAsyncHeapQueue[Task](5)
|
||||||
|
let data = [("a", 4), ("b", 3), ("c", 2)]
|
||||||
|
|
||||||
for item in [("a", 4), ("b", 3), ("c", 2)]:
|
for item in data:
|
||||||
check heap.pushNoWait(item).isOk
|
check heap.pushNoWait(item).isOk
|
||||||
|
|
||||||
check heap[0] == (name: "c", priority: 2)
|
check heap[0] == (name: "c", priority: 2)
|
||||||
@ -173,8 +174,9 @@ asyncchecksuite "Asynchronous Tests":
|
|||||||
|
|
||||||
test "Test pushOrUpdate - update":
|
test "Test pushOrUpdate - update":
|
||||||
var heap = newAsyncHeapQueue[Task](3)
|
var heap = newAsyncHeapQueue[Task](3)
|
||||||
|
let data = [("a", 4), ("b", 3), ("c", 2)]
|
||||||
|
|
||||||
for item in [("a", 4), ("b", 3), ("c", 2)]:
|
for item in data:
|
||||||
check heap.pushNoWait(item).isOk
|
check heap.pushNoWait(item).isOk
|
||||||
|
|
||||||
check heap[0] == (name: "c", priority: 2)
|
check heap[0] == (name: "c", priority: 2)
|
||||||
@ -183,8 +185,9 @@ asyncchecksuite "Asynchronous Tests":
|
|||||||
|
|
||||||
test "Test pushOrUpdate - push":
|
test "Test pushOrUpdate - push":
|
||||||
var heap = newAsyncHeapQueue[Task](2)
|
var heap = newAsyncHeapQueue[Task](2)
|
||||||
|
let data = [("a", 4), ("b", 3)]
|
||||||
|
|
||||||
for item in [("a", 4), ("b", 3)]:
|
for item in data:
|
||||||
check heap.pushNoWait(item).isOk
|
check heap.pushNoWait(item).isOk
|
||||||
|
|
||||||
check heap[0] == ("b", 3) # sanity check for order
|
check heap[0] == ("b", 3) # sanity check for order
|
||||||
|
|||||||
@ -6,16 +6,23 @@ import pkg/chronos
|
|||||||
import ../asynctest
|
import ../asynctest
|
||||||
import ./helpers
|
import ./helpers
|
||||||
|
|
||||||
|
# Trying to use a CancelledError or LPStreamError value for toRaise
|
||||||
|
# will produce a compilation error;
|
||||||
|
# Error: only a 'ref object' can be raised
|
||||||
|
# This is because they are not ref object but plain object.
|
||||||
|
# CancelledError* = object of FutureError
|
||||||
|
# LPStreamError* = object of LPError
|
||||||
|
|
||||||
type
|
type
|
||||||
CrashingStreamWrapper* = ref object of LPStream
|
CrashingStreamWrapper* = ref object of LPStream
|
||||||
toRaise*: ref CatchableError
|
toRaise*: proc(): void {.gcsafe, raises: [CancelledError, LPStreamError].}
|
||||||
|
|
||||||
method readOnce*(
|
method readOnce*(
|
||||||
self: CrashingStreamWrapper,
|
self: CrashingStreamWrapper,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
nbytes: int
|
nbytes: int
|
||||||
): Future[int] {.async.} =
|
): Future[int] {.gcsafe, async: (raises: [CancelledError, LPStreamError]).} =
|
||||||
raise self.toRaise
|
self.toRaise()
|
||||||
|
|
||||||
asyncchecksuite "Chunking":
|
asyncchecksuite "Chunking":
|
||||||
test "should return proper size chunks":
|
test "should return proper size chunks":
|
||||||
@ -88,13 +95,14 @@ asyncchecksuite "Chunking":
|
|||||||
string.fromBytes(data) == readFile(path)
|
string.fromBytes(data) == readFile(path)
|
||||||
fileChunker.offset == data.len
|
fileChunker.offset == data.len
|
||||||
|
|
||||||
proc raiseStreamException(exc: ref CatchableError) {.async.} =
|
proc raiseStreamException(exc: ref CancelledError | ref LPStreamError) {.async.} =
|
||||||
let stream = CrashingStreamWrapper.new()
|
let stream = CrashingStreamWrapper.new()
|
||||||
let chunker = LPStreamChunker.new(
|
let chunker = LPStreamChunker.new(
|
||||||
stream = stream,
|
stream = stream,
|
||||||
chunkSize = 2'nb)
|
chunkSize = 2'nb)
|
||||||
|
|
||||||
stream.toRaise = exc
|
stream.toRaise = proc(): void {.raises: [CancelledError, LPStreamError].} =
|
||||||
|
raise exc
|
||||||
discard (await chunker.getBytes())
|
discard (await chunker.getBytes())
|
||||||
|
|
||||||
test "stream should forward LPStreamError":
|
test "stream should forward LPStreamError":
|
||||||
@ -110,8 +118,4 @@ asyncchecksuite "Chunking":
|
|||||||
|
|
||||||
test "stream should forward LPStreamError":
|
test "stream should forward LPStreamError":
|
||||||
expect LPStreamError:
|
expect LPStreamError:
|
||||||
await raiseStreamException(newException(LPStreamError, "test error"))
|
await raiseStreamException(newException(LPStreamError, "test error"))
|
||||||
|
|
||||||
test "stream should convert other exceptions to defect":
|
|
||||||
expect Defect:
|
|
||||||
await raiseStreamException(newException(CatchableError, "test error"))
|
|
||||||
@ -1,6 +1,5 @@
|
|||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
import std/sugar
|
||||||
import std/cpuinfo
|
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
@ -12,7 +11,6 @@ import pkg/codex/blocktype as bt
|
|||||||
import pkg/codex/rng
|
import pkg/codex/rng
|
||||||
import pkg/codex/utils
|
import pkg/codex/utils
|
||||||
import pkg/codex/indexingstrategy
|
import pkg/codex/indexingstrategy
|
||||||
import pkg/taskpools
|
|
||||||
|
|
||||||
import ../asynctest
|
import ../asynctest
|
||||||
import ./helpers
|
import ./helpers
|
||||||
@ -27,7 +25,6 @@ suite "Erasure encode/decode":
|
|||||||
var manifest: Manifest
|
var manifest: Manifest
|
||||||
var store: BlockStore
|
var store: BlockStore
|
||||||
var erasure: Erasure
|
var erasure: Erasure
|
||||||
var taskpool: Taskpool
|
|
||||||
let repoTmp = TempLevelDb.new()
|
let repoTmp = TempLevelDb.new()
|
||||||
let metaTmp = TempLevelDb.new()
|
let metaTmp = TempLevelDb.new()
|
||||||
|
|
||||||
@ -38,8 +35,7 @@ suite "Erasure encode/decode":
|
|||||||
rng = Rng.instance()
|
rng = Rng.instance()
|
||||||
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
||||||
store = RepoStore.new(repoDs, metaDs)
|
store = RepoStore.new(repoDs, metaDs)
|
||||||
taskpool = Taskpool.new(num_threads = countProcessors())
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
|
||||||
manifest = await storeDataGetManifest(store, chunker)
|
manifest = await storeDataGetManifest(store, chunker)
|
||||||
|
|
||||||
teardown:
|
teardown:
|
||||||
|
|||||||
@ -15,7 +15,7 @@ suite "NAT Address Tests":
|
|||||||
udpPort = Port(1234)
|
udpPort = Port(1234)
|
||||||
natConfig = NatConfig(
|
natConfig = NatConfig(
|
||||||
hasExtIp: true,
|
hasExtIp: true,
|
||||||
extIp:ValidIpAddress.init("8.8.8.8"))
|
extIp: parseIpAddress("8.8.8.8"))
|
||||||
|
|
||||||
# Create test addresses
|
# Create test addresses
|
||||||
localAddr = MultiAddress.init("/ip4/127.0.0.1/tcp/5000").expect("valid multiaddr")
|
localAddr = MultiAddress.init("/ip4/127.0.0.1/tcp/5000").expect("valid multiaddr")
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import pkg/ethers
|
|||||||
import codex/contracts/deployment
|
import codex/contracts/deployment
|
||||||
import codex/conf
|
import codex/conf
|
||||||
import codex/contracts
|
import codex/contracts
|
||||||
|
import pkg/codex/utils/natutils
|
||||||
|
|
||||||
import ../asynctest
|
import ../asynctest
|
||||||
import ../checktest
|
import ../checktest
|
||||||
@ -18,7 +19,7 @@ proc configFactory(): CodexConf =
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"))
|
metricsAddress: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
proc configFactory(marketplace: Option[EthAddress]): CodexConf =
|
proc configFactory(marketplace: Option[EthAddress]): CodexConf =
|
||||||
CodexConf(
|
CodexConf(
|
||||||
@ -26,7 +27,7 @@ proc configFactory(marketplace: Option[EthAddress]): CodexConf =
|
|||||||
nat: NatConfig(
|
nat: NatConfig(
|
||||||
hasExtIp: false,
|
hasExtIp: false,
|
||||||
nat: NatNone),
|
nat: NatNone),
|
||||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
metricsAddress: parseIpAddress("127.0.0.1"),
|
||||||
marketplaceAddress: marketplace)
|
marketplaceAddress: marketplace)
|
||||||
|
|
||||||
asyncchecksuite "Deployment":
|
asyncchecksuite "Deployment":
|
||||||
|
|||||||
@ -14,9 +14,9 @@ var number = 0
|
|||||||
proc newDb*(self: TempLevelDb): Datastore =
|
proc newDb*(self: TempLevelDb): Datastore =
|
||||||
if self.currentPath.len > 0:
|
if self.currentPath.len > 0:
|
||||||
raiseAssert("TempLevelDb already active.")
|
raiseAssert("TempLevelDb already active.")
|
||||||
self.currentPath = getTempDir() / "templeveldb" / $number / $getmonotime()
|
self.currentPath = getTempDir() / "templeveldb" / $number / $getMonoTime()
|
||||||
inc number
|
inc number
|
||||||
createdir(self.currentPath)
|
createDir(self.currentPath)
|
||||||
self.ds = LevelDbDatastore.new(self.currentPath).tryGet()
|
self.ds = LevelDbDatastore.new(self.currentPath).tryGet()
|
||||||
return self.ds
|
return self.ds
|
||||||
|
|
||||||
@ -26,5 +26,5 @@ proc destroyDb*(self: TempLevelDb): Future[void] {.async.} =
|
|||||||
try:
|
try:
|
||||||
(await self.ds.close()).tryGet()
|
(await self.ds.close()).tryGet()
|
||||||
finally:
|
finally:
|
||||||
removedir(self.currentPath)
|
removeDir(self.currentPath)
|
||||||
self.currentPath = ""
|
self.currentPath = ""
|
||||||
|
|||||||
@ -26,22 +26,22 @@ type
|
|||||||
name*: string
|
name*: string
|
||||||
NodeProcessError* = object of CatchableError
|
NodeProcessError* = object of CatchableError
|
||||||
|
|
||||||
method workingDir(node: NodeProcess): string {.base.} =
|
method workingDir(node: NodeProcess): string {.base, gcsafe.} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method executable(node: NodeProcess): string {.base.} =
|
method executable(node: NodeProcess): string {.base, gcsafe.} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method startedOutput(node: NodeProcess): string {.base.} =
|
method startedOutput(node: NodeProcess): string {.base, gcsafe.} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base.} =
|
method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base, gcsafe.} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method outputLineEndings(node: NodeProcess): string {.base, raises: [].} =
|
method outputLineEndings(node: NodeProcess): string {.base, gcsafe raises: [].} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method onOutputLineCaptured(node: NodeProcess, line: string) {.base, raises: [].} =
|
method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe, raises: [].} =
|
||||||
raiseAssert "not implemented"
|
raiseAssert "not implemented"
|
||||||
|
|
||||||
method start*(node: NodeProcess) {.base, async.} =
|
method start*(node: NodeProcess) {.base, async.} =
|
||||||
|
|||||||
2
vendor/asynctest
vendored
2
vendor/asynctest
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8e2f4e73b97123be0f0041c129942b32df23ecb1
|
Subproject commit 5154c0d79dd8bb086ab418cc659e923330ac24f2
|
||||||
2
vendor/codex-storage-proofs-circuits
vendored
2
vendor/codex-storage-proofs-circuits
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c03b43221d68e34bd5015a4e4ee1a0ad3299f8ef
|
Subproject commit ac8d3667526862458b162bee71dd5dcf6170c209
|
||||||
2
vendor/combparser
vendored
2
vendor/combparser
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ba4464c005d7617c008e2ed2ebc1ba52feb469c6
|
Subproject commit e582c436e8750b60253370fd77960509d36e3738
|
||||||
2
vendor/constantine
vendored
2
vendor/constantine
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8367d7d19cdbba874aab961b70d272e742184c37
|
Subproject commit bc3845aa492b52f7fef047503b1592e830d1a774
|
||||||
2
vendor/nim-bearssl
vendored
2
vendor/nim-bearssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 99fcb3405c55b27cfffbf60f5368c55da7346f23
|
Subproject commit 667b40440a53a58e9f922e29e20818720c62d9ac
|
||||||
2
vendor/nim-blscurve
vendored
2
vendor/nim-blscurve
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 48d8668c5a9a350d3a7ee0c3713ef9a11980a40d
|
Subproject commit de2d3c79264bba18dbea469c8c5c4b3bb3c8bc55
|
||||||
2
vendor/nim-chronicles
vendored
2
vendor/nim-chronicles
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c9c8e58ec3f89b655a046c485f622f9021c68b61
|
Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||||
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9
|
Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||||
2
vendor/nim-codex-dht
vendored
2
vendor/nim-codex-dht
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6
|
Subproject commit 4bd3a39e0030f8ee269ef217344b6b59ec2be6dc
|
||||||
2
vendor/nim-confutils
vendored
2
vendor/nim-confutils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2028b41602b3abf7c9bf450744efde7b296707a2
|
Subproject commit cb858a27f4347be949d10ed74b58713d687936d2
|
||||||
2
vendor/nim-contract-abi
vendored
2
vendor/nim-contract-abi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 61f8f59b3917d8e27c6eb4330a6d8cf428e98b2d
|
Subproject commit 842f48910be4f388bcbf8abf1f02aba1d5e2ee64
|
||||||
2
vendor/nim-datastore
vendored
2
vendor/nim-datastore
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3ab6b84a634a7b2ee8c0144f050bf5893cd47c17
|
Subproject commit d67860add63fd23cdacde1d3da8f4739c2660c2d
|
||||||
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 15a09fab737d08a2545284c727199c377bb0f4b7
|
Subproject commit dcfbc4291d39b59563828c3e32be4d51a2f25931
|
||||||
2
vendor/nim-ethers
vendored
2
vendor/nim-ethers
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2808a05488152c8b438d947dc871445164fa1278
|
Subproject commit 1cfccb9695fa47860bf7ef3d75da9019096a3933
|
||||||
2
vendor/nim-faststreams
vendored
2
vendor/nim-faststreams
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 720fc5e5c8e428d9d0af618e1e27c44b42350309
|
Subproject commit cf8d4d22636b8e514caf17e49f9c786ac56b0e85
|
||||||
2
vendor/nim-http-utils
vendored
2
vendor/nim-http-utils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45
|
Subproject commit 8bb1acbaa4b86eb866145b0d468eff64a57d1897
|
||||||
2
vendor/nim-json-rpc
vendored
2
vendor/nim-json-rpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 0408795be95c00d75e96eaef6eae8a9c734014f5
|
Subproject commit 274372132de497e6b7b793c9d5d5474b71bf80a2
|
||||||
2
vendor/nim-json-serialization
vendored
2
vendor/nim-json-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5127b26ee58076e9369e7c126c196793c2b12e73
|
Subproject commit 6eadb6e939ffa7882ff5437033c11a9464d3385c
|
||||||
2
vendor/nim-leopard
vendored
2
vendor/nim-leopard
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97
|
Subproject commit 3e09d8113f874f3584c3fe93818541b2ff9fb9c3
|
||||||
2
vendor/nim-libbacktrace
vendored
2
vendor/nim-libbacktrace
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b29c22ba0ef13de50b779c776830dbea1d50cd33
|
Subproject commit 6da0cda88ab7780bd5fd342327adb91ab84692aa
|
||||||
2
vendor/nim-libp2p
vendored
2
vendor/nim-libp2p
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b239791c568d9f9a76fd66d2322b2754700b6cc5
|
Subproject commit 036e110a6080fba1a1662c58cfd8c21f9a548021
|
||||||
2
vendor/nim-metrics
vendored
2
vendor/nim-metrics
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6142e433fc8ea9b73379770a788017ac528d46ff
|
Subproject commit cacfdc12454a0804c65112b9f4f50d1375208dcd
|
||||||
2
vendor/nim-nat-traversal
vendored
2
vendor/nim-nat-traversal
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 27d314d65c9078924b3239fe4e2f5af0c512b28c
|
Subproject commit 5e4059746e9095e1731b02eeaecd62a70fbe664d
|
||||||
2
vendor/nim-nitro
vendored
2
vendor/nim-nitro
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6b4c455bf4dad7449c1580055733a1738fcd5aec
|
Subproject commit e3719433d5ace25947c468787c805969642b3913
|
||||||
2
vendor/nim-poseidon2
vendored
2
vendor/nim-poseidon2
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 0346982f2c6891bcedd03d552af3a3bd57b2c1f9
|
Subproject commit 4e2c6e619b2f2859aaa4b2aed2f346ea4d0c67a3
|
||||||
2
vendor/nim-presto
vendored
2
vendor/nim-presto
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c17bfdda2c60cf5fadb043feb22e328b7659c719
|
Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be
|
||||||
2
vendor/nim-protobuf-serialization
vendored
2
vendor/nim-protobuf-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 28214b3e40c755a9886d2ec8f261ec48fbb6bec6
|
Subproject commit 5a31137a82c2b6a989c9ed979bb636c7a49f570e
|
||||||
2
vendor/nim-results
vendored
2
vendor/nim-results
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f3c666a272c69d70cb41e7245e7f6844797303ad
|
Subproject commit df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||||
2
vendor/nim-serde
vendored
2
vendor/nim-serde
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 83e4a2ccf621d3040c6e7e0267393ca2d205988e
|
Subproject commit 69a7a0111addaa4aad885dd4bd7b5ee4684a06de
|
||||||
2
vendor/nim-serialization
vendored
2
vendor/nim-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f709bd9e16b1b6870fe3e4401196479e014a2ef6
|
Subproject commit 2086c99608b4bf472e1ef5fe063710f280243396
|
||||||
2
vendor/nim-sqlite3-abi
vendored
2
vendor/nim-sqlite3-abi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3
|
Subproject commit 05bbff1af4e8fe2d972ba4b0667b89ca94d3ebba
|
||||||
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 7afe7e3c070758cac1f628e4330109f3ef6fc853
|
Subproject commit a6e198132097fb544d04959aeb3b839e1408f942
|
||||||
2
vendor/nim-taskpools
vendored
2
vendor/nim-taskpools
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b3673c7a7a959ccacb393bd9b47e997bbd177f5a
|
Subproject commit 66585e2e960b7695e48ea60377fb3aeac96406e8
|
||||||
2
vendor/nim-testutils
vendored
2
vendor/nim-testutils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b56a5953e37fc5117bd6ea6dfa18418c5e112815
|
Subproject commit 4d37244f9f5e1acd8592a4ceb5c3fc47bc160181
|
||||||
2
vendor/nim-toml-serialization
vendored
2
vendor/nim-toml-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 86d477136f105f04bfd0dd7c0e939593d81fc581
|
Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a
|
||||||
2
vendor/nim-unittest2
vendored
2
vendor/nim-unittest2
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b178f47527074964f76c395ad0dfc81cf118f379
|
Subproject commit 845b6af28b9f68f02d320e03ad18eccccea7ddb9
|
||||||
2
vendor/nim-websock
vendored
2
vendor/nim-websock
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2c3ae3137f3c9cb48134285bd4a47186fa51f0e8
|
Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508
|
||||||
2
vendor/nim-zlib
vendored
2
vendor/nim-zlib
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f34ca261efd90f118dc1647beefd2f7a69b05d93
|
Subproject commit 91cf360b1aeb2e0c753ff8bac6de22a41c5ed8cd
|
||||||
2
vendor/nimbus-build-system
vendored
2
vendor/nimbus-build-system
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782
|
Subproject commit 4c6ff070c116450bb2c285691724ac9e6202cb28
|
||||||
2
vendor/nimcrypto
vendored
2
vendor/nimcrypto
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 24e006df85927f64916e60511620583b11403178
|
Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc
|
||||||
2
vendor/npeg
vendored
2
vendor/npeg
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b15a10e388b91b898c581dbbcb6a718d46b27d2f
|
Subproject commit 409f6796d0e880b3f0222c964d1da7de6e450811
|
||||||
2
vendor/stint
vendored
2
vendor/stint
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 86621eced1dcfb5e25903019ebcfc76ed9128ec5
|
Subproject commit 5c5e01cef089a261474b7abfe246b37447aaa8ed
|
||||||
2
vendor/upraises
vendored
2
vendor/upraises
vendored
@ -1 +1 @@
|
|||||||
Subproject commit ff4f8108e44fba9b35cac535ab63d3927e8fd3c2
|
Subproject commit bc2628989b63854d980e92dadbd58f83e34b6f25
|
||||||
Loading…
x
Reference in New Issue
Block a user