diff --git a/.github/actions/nimbus-build-system/action.yml b/.github/actions/nimbus-build-system/action.yml index b80c05c7..c7bdb627 100644 --- a/.github/actions/nimbus-build-system/action.yml +++ b/.github/actions/nimbus-build-system/action.yml @@ -11,13 +11,16 @@ inputs: default: "amd64" nim_version: description: "Nim version" - default: "version-1-6" + default: "v2.0.14" rust_version: description: "Rust version" default: "1.79.0" shell: description: "Shell to run commands in" default: "bash --noprofile --norc -e -o pipefail" + coverage: + description: "True if the process is used for coverage" + default: false runs: using: "composite" steps: @@ -31,8 +34,8 @@ runs: if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64') shell: ${{ inputs.shell }} {0} run: | - sudo apt-fast update -qq - sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \ + sudo apt-get update -qq + sudo DEBIAN_FRONTEND='noninteractive' apt-get install \ --no-install-recommends -yq lcov - name: APT (Linux i386) @@ -40,8 +43,8 @@ runs: shell: ${{ inputs.shell }} {0} run: | sudo dpkg --add-architecture i386 - sudo apt-fast update -qq - sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \ + sudo apt-get update -qq + sudo DEBIAN_FRONTEND='noninteractive' apt-get install \ --no-install-recommends -yq gcc-multilib g++-multilib - name: Homebrew (macOS) @@ -78,11 +81,21 @@ runs: mingw-w64-i686-ntldd-git mingw-w64-i686-rust - - name: MSYS2 (Windows All) - Downgrade to gcc 13 + - name: MSYS2 (Windows All) - Update to gcc 14 if: inputs.os == 'windows' shell: ${{ inputs.shell }} {0} run: | - pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-13.2.0-6-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-13.2.0-6-any.pkg.tar.zst + pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst + + - name: Install gcc 14 on Linux + # We don't want to install gcc 14 for coverage (Ubuntu 20.04) + if : ${{ inputs.os == 'linux' && !inputs.coverage }} + shell: ${{ inputs.shell }} {0} + run: | + # Add GCC-14 to alternatives + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14 + # Set GCC-14 as the default + sudo update-alternatives --set gcc /usr/bin/gcc-14 - name: Derive environment variables shell: ${{ inputs.shell }} {0} @@ -159,6 +172,7 @@ runs: - name: Restore Nim toolchain binaries from cache id: nim-cache uses: actions/cache@v4 + if : ${{ !inputs.coverage }} with: path: NimBinaries key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }} @@ -168,9 +182,16 @@ runs: shell: ${{ inputs.shell }} {0} run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV} + - name: MSYS2 (Windows All) - Disable git symbolic links (since miniupnp 2.2.5) + if: inputs.os == 'windows' + shell: ${{ inputs.shell }} {0} + run: | + git config --global core.symlinks false + - name: Build Nim and Codex dependencies shell: ${{ inputs.shell }} {0} run: | + gcc --version make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update echo ./env.sh nim --version diff --git a/.github/workflows/ci-reusable.yml b/.github/workflows/ci-reusable.yml index 55846d89..ce66a9b6 100644 --- a/.github/workflows/ci-reusable.yml +++ b/.github/workflows/ci-reusable.yml @@ -40,6 +40,7 @@ jobs: os: ${{ matrix.os }} shell: ${{ matrix.shell }} nim_version: ${{ matrix.nim_version }} + coverage: false ## Part 1 Tests ## - name: Unit tests diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d27077a..9b865de6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ on: env: cache_nonce: 0 # Allows for easily busting actions/cache caches - nim_version: pinned + nim_version: v2.0.14 concurrency: group: ${{ github.workflow }}-${{ github.ref || github.run_id }} @@ -27,10 +27,10 @@ jobs: uses: fabiocaccamo/create-matrix-action@v4 with: matrix: | - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} - os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} + os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} @@ -48,6 +48,10 @@ jobs: cache_nonce: ${{ needs.matrix.outputs.cache_nonce }} coverage: + # Force to stick to ubuntu 20.04 for coverage because + # lcov was updated to 2.x version in ubuntu-latest + # and cause a lot of issues. + # See https://github.com/linux-test-project/lcov/issues/238 runs-on: ubuntu-20.04 steps: - name: Checkout sources @@ -61,6 +65,7 @@ jobs: with: os: linux nim_version: ${{ env.nim_version }} + coverage: true - name: Generate coverage data run: | diff --git a/Makefile b/Makefile index cd6e5a0a..22cb2b31 100644 --- a/Makefile +++ b/Makefile @@ -15,8 +15,8 @@ # # If NIM_COMMIT is set to "nimbusbuild", this will use the # version pinned by nimbus-build-system. -PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 - +#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 +PINNED_NIM_VERSION := v2.0.14 ifeq ($(NIM_COMMIT),) NIM_COMMIT := $(PINNED_NIM_VERSION) else ifeq ($(NIM_COMMIT),pinned) diff --git a/codex.nim b/codex.nim index 0b295d92..e2c6033e 100644 --- a/codex.nim +++ b/codex.nim @@ -47,7 +47,7 @@ when isMainModule: let config = CodexConf.load( version = codexFullVersion, envVarsPrefix = "codex", - secondarySources = proc (config: CodexConf, sources: auto) = + secondarySources = proc (config: CodexConf, sources: auto) {.gcsafe, raises: [ConfigurationError].} = if configFile =? config.configFile: sources.addConfigFile(Toml, configFile) ) diff --git a/codex/clock.nim b/codex/clock.nim index f680ddec..933cd199 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -8,7 +8,7 @@ type SecondsSince1970* = int64 Timeout* = object of CatchableError -method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} = +method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} = raiseAssert "not implemented" method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} = diff --git a/codex/codex.nim b/codex/codex.nim index 8ecdd178..b22bf3d4 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -11,7 +11,6 @@ import std/sequtils import std/strutils import std/os import std/tables -import std/cpuinfo import pkg/chronos import pkg/presto @@ -24,7 +23,6 @@ import pkg/stew/shims/net as stewnet import pkg/datastore import pkg/ethers except Rng import pkg/stew/io2 -import pkg/taskpools import ./node import ./conf @@ -56,7 +54,6 @@ type codexNode: CodexNodeRef repoStore: RepoStore maintenance: BlockMaintainer - taskpool: Taskpool CodexPrivateKey* = libp2p.PrivateKey # alias EthWallet = ethers.Wallet @@ -174,10 +171,6 @@ proc start*(s: CodexServer) {.async.} = proc stop*(s: CodexServer) {.async.} = notice "Stopping codex node" - - s.taskpool.syncAll() - s.taskpool.shutdown() - await allFuturesThrowing( s.restServer.stop(), s.codexNode.switch.stop(), @@ -266,15 +259,12 @@ proc new*( else: none Prover - taskpool = Taskpool.new(num_threads = countProcessors()) - codexNode = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, - prover = prover, discovery = discovery, - taskpool = taskpool) + prover = prover) restServer = RestServerRef.new( codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), @@ -290,5 +280,4 @@ proc new*( codexNode: codexNode, restServer: restServer, repoStore: repoStore, - maintenance: maintenance, - taskpool: taskpool) + maintenance: maintenance) diff --git a/codex/conf.nim b/codex/conf.nim index 3a232b9f..41ee628e 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -43,6 +43,7 @@ import ./units import ./utils import ./nat import ./utils/natutils + from ./validationconfig import MaxSlots, ValidationGroups export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig @@ -119,9 +120,9 @@ type metricsAddress* {. desc: "Listening address of the metrics server" - defaultValue: ValidIpAddress.init("127.0.0.1") + defaultValue: defaultAddress(config) defaultValueDesc: "127.0.0.1" - name: "metrics-address" }: ValidIpAddress + name: "metrics-address" }: IpAddress metricsPort* {. desc: "Listening HTTP port of the metrics server" @@ -147,7 +148,7 @@ type nat* {. desc: "Specify method to use for determining public address. " & "Must be one of: any, none, upnp, pmp, extip:" - defaultValue: NatConfig(hasExtIp: false, nat: NatAny) + defaultValue: defaultNatConfig() defaultValueDesc: "any" name: "nat" }: NatConfig @@ -410,6 +411,12 @@ type logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog logutils.formatIt(LogFormat.json, EthAddress): %it +func defaultAddress*(conf: CodexConf): IpAddress = + result = static parseIpAddress("127.0.0.1") + +func defaultNatConfig*(): NatConfig = + result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) + func persistence*(self: CodexConf): bool = self.cmd == StartUpCmd.persistence @@ -442,13 +449,17 @@ const proc parseCmdArg*(T: typedesc[MultiAddress], input: string): MultiAddress - {.upraises: [ValueError, LPError].} = + {.upraises: [ValueError] .} = var ma: MultiAddress - let res = MultiAddress.init(input) - if res.isOk: - ma = res.get() - else: - warn "Invalid MultiAddress", input=input, error = res.error() + try: + let res = MultiAddress.init(input) + if res.isOk: + ma = res.get() + else: + warn "Invalid MultiAddress", input=input, error = res.error() + quit QuitFailure + except LPError as exc: + warn "Invalid MultiAddress uri", uri = input, error = exc.msg quit QuitFailure ma @@ -458,6 +469,9 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = if not res.fromURI(uri): warn "Invalid SignedPeerRecord uri", uri = uri quit QuitFailure + except LPError as exc: + warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg + quit QuitFailure except CatchableError as exc: warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg quit QuitFailure @@ -476,7 +490,7 @@ func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = else: if p.startsWith("extip:"): try: - let ip = ValidIpAddress.init(p[6..^1]) + let ip = parseIpAddress(p[6..^1]) NatConfig(hasExtIp: true, extIp: ip) except ValueError: let error = "Not a valid IP address: " & p[6..^1] @@ -516,7 +530,11 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = error "invalid SignedPeerRecord configuration value", error = err.msg quit QuitFailure - val = SignedPeerRecord.parseCmdArg(uri) + try: + val = SignedPeerRecord.parseCmdArg(uri) + except LPError as err: + warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg + quit QuitFailure proc readValue*(r: var TomlReader, val: var MultiAddress) = without input =? r.readValue(string).catch, err: diff --git a/codex/discovery.nim b/codex/discovery.nim index d2ea1147..e3e37d61 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -17,6 +17,7 @@ import pkg/questionable/results import pkg/stew/shims/net import pkg/contractabi/address as ca import pkg/codexdht/discv5/[routing_table, protocol as discv5] +from pkg/nimcrypto import keccak256 import ./rng import ./errors @@ -124,7 +125,7 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} = method removeProvider*( d: Discovery, - peerId: PeerId): Future[void] {.base.} = + peerId: PeerId): Future[void] {.base, gcsafe.} = ## Remove provider from providers table ## @@ -169,7 +170,7 @@ proc stop*(d: Discovery) {.async.} = proc new*( T: type Discovery, key: PrivateKey, - bindIp = ValidIpAddress.init(IPv4_any()), + bindIp = IPv4_any(), bindPort = 0.Port, announceAddrs: openArray[MultiAddress], bootstrapNodes: openArray[SignedPeerRecord] = [], @@ -199,7 +200,7 @@ proc new*( self.protocol = newProtocol( key, - bindIp = bindIp.toNormalIp, + bindIp = bindIp, bindPort = bindPort, record = self.providerRecord.get, bootstrapRecords = bootstrapNodes, diff --git a/codex/erasure/backend.nim b/codex/erasure/backend.nim index 728c8bed..873d800e 100644 --- a/codex/erasure/backend.nim +++ b/codex/erasure/backend.nim @@ -22,7 +22,7 @@ type EncoderBackend* = ref object of ErasureBackend DecoderBackend* = ref object of ErasureBackend -method release*(self: ErasureBackend) {.base.} = +method release*(self: ErasureBackend) {.base, gcsafe.} = ## release the backend ## raiseAssert("not implemented!") @@ -31,7 +31,7 @@ method encode*( self: EncoderBackend, buffers, parity: var openArray[seq[byte]] -): Result[void, cstring] {.base.} = +): Result[void, cstring] {.base, gcsafe.} = ## encode buffers using a backend ## raiseAssert("not implemented!") @@ -41,7 +41,7 @@ method decode*( buffers, parity, recovered: var openArray[seq[byte]] -): Result[void, cstring] {.base.} = +): Result[void, cstring] {.base, gcsafe.} = ## decode buffers using a backend ## raiseAssert("not implemented!") diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 56e3e1cf..d35fc18d 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -17,7 +17,6 @@ import std/sugar import pkg/chronos import pkg/libp2p/[multicodec, cid, multihash] import pkg/libp2p/protobuf/minprotobuf -import pkg/taskpools import ../logutils import ../manifest @@ -32,7 +31,6 @@ import ../errors import pkg/stew/byteutils import ./backend -import ./asyncbackend export backend @@ -73,7 +71,6 @@ type encoderProvider*: EncoderProvider decoderProvider*: DecoderProvider store*: BlockStore - taskpool: Taskpool EncodingParams = object ecK: Natural @@ -295,23 +292,30 @@ proc encodeData( # TODO: Don't allocate a new seq every time, allocate once and zero out var data = seq[seq[byte]].new() # number of blocks to encode + parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int)) data[].setLen(params.ecK) + # TODO: this is a tight blocking loop so we sleep here to allow + # other events to be processed, this should be addressed + # by threading + await sleepAsync(10.millis) without resolved =? (await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err: trace "Unable to prepare data", error = err.msg return failure(err) - trace "Erasure coding data", data = data[].len, parity = params.ecM + trace "Erasure coding data", data = data[].len, parity = parityData.len - without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err: - trace "Error encoding data", err = err.msg - return failure(err) + if ( + let res = encoder.encode(data[], parityData); + res.isErr): + trace "Unable to encode manifest!", error = $res.error + return failure($res.error) var idx = params.rounded + step for j in 0.. 0.5 ) proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable = diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index a5531e79..ce2e53f5 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -36,7 +36,7 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = slotIndex = data.slotIndex let slotState = await market.slotState(slotId(data.requestId, data.slotIndex)) - var collateral: Uint256 + var collateral: UInt256 if slotState == SlotState.Repair: # When repairing the node gets "discount" on the collateral that it needs to diff --git a/codex/slots/proofs/backendfactory.nim b/codex/slots/proofs/backendfactory.nim index 80dc1b8e..ac478e1a 100644 --- a/codex/slots/proofs/backendfactory.nim +++ b/codex/slots/proofs/backendfactory.nim @@ -43,7 +43,7 @@ proc zkeyFilePath(config: CodexConf): string = proc initializeFromCircuitDirFiles( config: CodexConf, - utils: BackendUtils): ?!AnyBackend = + utils: BackendUtils): ?!AnyBackend {.gcsafe.} = if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and fileExists(config.zkeyFilePath): diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 8619457a..374b8151 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -76,7 +76,7 @@ proc release*(self: CircomCompat) = ## if not isNil(self.backendCfg): - self.backendCfg.unsafeAddr.releaseCfg() + self.backendCfg.unsafeAddr.release_cfg() if not isNil(self.vkp): self.vkp.unsafeAddr.release_key() @@ -102,9 +102,9 @@ proc prove[H]( defer: if ctx != nil: - ctx.addr.releaseCircomCompat() + ctx.addr.release_circom_compat() - if initCircomCompat( + if init_circom_compat( self.backendCfg, addr ctx) != ERR_OK or ctx == nil: raiseAssert("failed to initialize CircomCompat ctx") @@ -114,27 +114,27 @@ proc prove[H]( dataSetRoot = input.datasetRoot.toBytes slotRoot = input.slotRoot.toBytes - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK: return failure("Failed to push entropy") - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK: return failure("Failed to push data set root") - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK: return failure("Failed to push data set root") - if ctx.pushInputU32( + if ctx.push_input_u32( "nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: return failure("Failed to push nCellsPerSlot") - if ctx.pushInputU32( + if ctx.push_input_u32( "nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK: return failure("Failed to push nSlotsPerDataSet") - if ctx.pushInputU32( + if ctx.push_input_u32( "slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: return failure("Failed to push slotIndex") @@ -143,7 +143,7 @@ proc prove[H]( doAssert(slotProof.len == self.datasetDepth) # arrays are always flattened - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len)) != ERR_OK: @@ -154,13 +154,13 @@ proc prove[H]( merklePaths = s.merklePaths.mapIt( it.toBytes ) data = s.cellData.mapIt( @(it.toBytes) ).concat - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "merklePaths".cstring, merklePaths[0].addr, uint (merklePaths[0].len * merklePaths.len)) != ERR_OK: return failure("Failed to push merkle paths") - if ctx.pushInputU256Array( + if ctx.push_input_u256_array( "cellData".cstring, data[0].addr, data.len.uint) != ERR_OK: @@ -172,7 +172,7 @@ proc prove[H]( let proof = try: if ( - let res = self.backendCfg.proveCircuit(ctx, proofPtr.addr); + let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or proofPtr == nil: return failure("Failed to prove - err code: " & $res) @@ -180,7 +180,7 @@ proc prove[H]( proofPtr[] finally: if proofPtr != nil: - proofPtr.addr.releaseProof() + proofPtr.addr.release_proof() success proof @@ -202,7 +202,7 @@ proc verify*[H]( inputs = inputs.toCircomInputs() try: - let res = verifyCircuit(proofPtr, inputs.addr, self.vkp) + let res = verify_circuit(proofPtr, inputs.addr, self.vkp) if res == ERR_OK: success true elif res == ERR_FAILED_TO_VERIFY_PROOF: @@ -228,18 +228,18 @@ proc init*( var cfg: ptr CircomBn254Cfg var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil - if initCircomConfig( + if init_circom_config( r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or cfg == nil: - if cfg != nil: cfg.addr.releaseCfg() + if cfg != nil: cfg.addr.release_cfg() raiseAssert("failed to initialize circom compat config") var vkpPtr: ptr VerifyingKey = nil - if cfg.getVerifyingKey(vkpPtr.addr) != ERR_OK or vkpPtr == nil: - if vkpPtr != nil: vkpPtr.addr.releaseKey() + if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil: + if vkpPtr != nil: vkpPtr.addr.release_key() raiseAssert("Failed to get verifying key") CircomCompat( diff --git a/codex/slots/proofs/backendutils.nim b/codex/slots/proofs/backendutils.nim index 19740acb..f7e6e2e1 100644 --- a/codex/slots/proofs/backendutils.nim +++ b/codex/slots/proofs/backendutils.nim @@ -8,5 +8,5 @@ method initializeCircomBackend*( r1csFile: string, wasmFile: string, zKeyFile: string -): AnyBackend {.base.} = +): AnyBackend {.base, gcsafe.}= CircomCompat.init(r1csFile, wasmFile, zKeyFile) diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index 52f37517..791e7d5b 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -33,13 +33,13 @@ type BlockStore* = ref object of RootObj onBlockStored*: ?CidCallback -method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} = +method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## raiseAssert("getBlock by cid not implemented!") -method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base.} = +method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## @@ -50,13 +50,13 @@ method getCid*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Cid] {. ## raiseAssert("getCid by treecid not implemented!") -method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base.} = +method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## raiseAssert("getBlock by addr not implemented!") -method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base.} = +method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base, gcsafe.} = ## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree ## @@ -65,7 +65,7 @@ method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future method putBlock*( self: BlockStore, blk: Block, - ttl = Duration.none): Future[?!void] {.base.} = + ttl = Duration.none): Future[?!void] {.base, gcsafe.} = ## Put a block to the blockstore ## @@ -76,7 +76,7 @@ method putCidAndProof*( treeCid: Cid, index: Natural, blockCid: Cid, - proof: CodexProof): Future[?!void] {.base.} = + proof: CodexProof): Future[?!void] {.base, gcsafe.} = ## Put a block proof to the blockstore ## @@ -85,7 +85,7 @@ method putCidAndProof*( method getCidAndProof*( self: BlockStore, treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] {.base.} = + index: Natural): Future[?!(Cid, CodexProof)] {.base, gcsafe.} = ## Get a block proof from the blockstore ## @@ -94,7 +94,7 @@ method getCidAndProof*( method ensureExpiry*( self: BlockStore, cid: Cid, - expiry: SecondsSince1970): Future[?!void] {.base.} = + expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = ## Ensure that block's assosicated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -105,32 +105,32 @@ method ensureExpiry*( self: BlockStore, treeCid: Cid, index: Natural, - expiry: SecondsSince1970): Future[?!void] {.base.} = + expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## raiseAssert("Not implemented!") -method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base.} = +method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base, gcsafe.} = ## Delete a block from the blockstore ## raiseAssert("delBlock not implemented!") -method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base.} = +method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base, gcsafe.} = ## Delete a block from the blockstore ## raiseAssert("delBlock not implemented!") -method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} = +method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base, gcsafe.} = ## Check if the block exists in the blockstore ## raiseAssert("hasBlock not implemented!") -method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base.} = +method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base, gcsafe.} = ## Check if the block exists in the blockstore ## @@ -138,13 +138,13 @@ method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {. method listBlocks*( self: BlockStore, - blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base.} = + blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## raiseAssert("listBlocks not implemented!") -method close*(self: BlockStore): Future[void] {.base.} = +method close*(self: BlockStore): Future[void] {.base, gcsafe.} = ## Close the blockstore, cleaning up resources managed by it. ## For some implementations this may be a no-op ## diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index 5ff99e64..63c59d2b 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -323,15 +323,16 @@ method getBlockExpirations*( return failure(err) let - filteredIter = await asyncQueryIter.filterSuccess() - blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, - proc (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = - without cid =? Cid.init(kv.key.value).mapFailure, err: - error "Failed decoding cid", err = err.msg - return BlockExpiration.none + filteredIter: AsyncIter[KeyVal[BlockMetadata]] = await asyncQueryIter.filterSuccess() - BlockExpiration(cid: cid, expiry: kv.value.expiry).some - ) + proc mapping (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = + without cid =? Cid.init(kv.key.value).mapFailure, err: + error "Failed decoding cid", err = err.msg + return BlockExpiration.none + + BlockExpiration(cid: cid, expiry: kv.value.expiry).some + + let blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping) success(blockExpIter) diff --git a/codex/streams/asyncstreamwrapper.nim b/codex/streams/asyncstreamwrapper.nim index a8a55955..327643b4 100644 --- a/codex/streams/asyncstreamwrapper.nim +++ b/codex/streams/asyncstreamwrapper.nim @@ -64,7 +64,7 @@ method readOnce*( self: AsyncStreamWrapper, pbytes: pointer, nbytes: int -): Future[int] {.async.} = +): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} = trace "Reading bytes from reader", bytes = nbytes if isNil(self.reader): @@ -118,7 +118,7 @@ method closed*(self: AsyncStreamWrapper): bool = method atEof*(self: AsyncStreamWrapper): bool = self.reader.atEof() -method closeImpl*(self: AsyncStreamWrapper) {.async.} = +method closeImpl*(self: AsyncStreamWrapper) {.async: (raises: []).} = try: trace "Shutting down async chronos stream" if not self.closed(): @@ -130,7 +130,7 @@ method closeImpl*(self: AsyncStreamWrapper) {.async.} = trace "Shutdown async chronos stream" except CancelledError as exc: - raise exc + error "Error received cancelled error when closing chronos stream", msg = exc.msg except CatchableError as exc: trace "Error closing async chronos stream", msg = exc.msg diff --git a/codex/streams/storestream.nim b/codex/streams/storestream.nim index 8a3b1a3c..bdfcc439 100644 --- a/codex/streams/storestream.nim +++ b/codex/streams/storestream.nim @@ -73,11 +73,20 @@ proc `size=`*(self: StoreStream, size: int) method atEof*(self: StoreStream): bool = self.offset >= self.size +type LPStreamReadError* = object of LPStreamError + par*: ref CatchableError + +proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = + var w = newException(LPStreamReadError, "Read stream failed") + w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg + w.par = p + result = w + method readOnce*( self: StoreStream, pbytes: pointer, nbytes: int -): Future[int] {.async.} = +): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} = ## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`. ## Return how many bytes were actually read before EOF was encountered. ## Raise exception if we are already at EOF. @@ -100,8 +109,9 @@ method readOnce*( self.manifest.blockSize.int - blockOffset]) address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) + # Read contents of block `blockNum` - without blk =? await self.store.getBlock(address), error: + without blk =? (await self.store.getBlock(address)).tryGet.catch, error: raise newLPStreamReadError(error) trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset diff --git a/codex/utils/addrutils.nim b/codex/utils/addrutils.nim index ec8d480a..3eec3015 100644 --- a/codex/utils/addrutils.nim +++ b/codex/utils/addrutils.nim @@ -19,7 +19,7 @@ import pkg/stew/endians2 func remapAddr*( address: MultiAddress, - ip: Option[ValidIpAddress] = ValidIpAddress.none, + ip: Option[IpAddress] = IpAddress.none, port: Option[Port] = Port.none ): MultiAddress = ## Remap addresses to new IP and/or Port @@ -41,7 +41,7 @@ func remapAddr*( MultiAddress.init(parts.join("/")) .expect("Should construct multiaddress") -proc getMultiAddrWithIPAndUDPPort*(ip: ValidIpAddress, port: Port): MultiAddress = +proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress = ## Creates a MultiAddress with the specified IP address and UDP port ## ## Parameters: @@ -54,7 +54,7 @@ proc getMultiAddrWithIPAndUDPPort*(ip: ValidIpAddress, port: Port): MultiAddress let ipFamily = if ip.family == IpAddressFamily.IPv4: "/ip4/" else: "/ip6/" return MultiAddress.init(ipFamily & $ip & "/udp/" & $port).expect("valid multiaddr") -proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], port: Option[Port]] = +proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[IpAddress], port: Option[Port]] = try: # Try IPv4 first let ipv4Result = ma[multiCodec("ip4")] @@ -63,7 +63,7 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por .protoArgument() .expect("Invalid IPv4 format") let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] - some(ipv4(ipArray)) + some(IpAddress(family: IPv4, address_v4: ipArray)) else: # Try IPv6 if IPv4 not found let ipv6Result = ma[multiCodec("ip6")] @@ -74,9 +74,9 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por var ipArray: array[16, byte] for i in 0..15: ipArray[i] = ipBytes[i] - some(ipv6(ipArray)) + some(IpAddress(family: IPv6, address_v6: ipArray)) else: - none(ValidIpAddress) + none(IpAddress) # Get TCP Port let portResult = ma[multiCodec("tcp")] @@ -89,4 +89,4 @@ proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[ValidIpAddress], por none(Port) (ip: ip, port: port) except Exception: - (ip: none(ValidIpAddress), port: none(Port)) + (ip: none(IpAddress), port: none(Port)) diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index bb7f198c..6bddc24e 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -22,7 +22,7 @@ logScope: proc new*[T: Machine](_: type T): T = T(trackedFutures: TrackedFutures.new()) -method `$`*(state: State): string {.base.} = +method `$`*(state: State): string {.base, gcsafe.} = raiseAssert "not implemented" proc transition(_: type Event, previous, next: State): Event = diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 25f083bd..86497e12 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -16,14 +16,14 @@ type type IpLimits* = object limit*: uint - ips: Table[ValidIpAddress, uint] + ips: Table[IpAddress, uint] -func hash*(ip: ValidIpAddress): Hash = +func hash*(ip: IpAddress): Hash = case ip.family of IpAddressFamily.IPv6: hash(ip.address_v6) of IpAddressFamily.IPv4: hash(ip.address_v4) -func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool = +func inc*(ipLimits: var IpLimits, ip: IpAddress): bool = let val = ipLimits.ips.getOrDefault(ip, 0) if val < ipLimits.limit: ipLimits.ips[ip] = val + 1 @@ -31,7 +31,7 @@ func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool = else: false -func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) = +func dec*(ipLimits: var IpLimits, ip: IpAddress) = let val = ipLimits.ips.getOrDefault(ip, 0) if val == 1: ipLimits.ips.del(ip) @@ -48,7 +48,7 @@ func isGlobalUnicast*(address: IpAddress): bool = let a = initTAddress(address, Port(0)) a.isGlobalUnicast() -proc getRouteIpv4*(): Result[ValidIpAddress, cstring] = +proc getRouteIpv4*(): Result[IpAddress, cstring] = # Avoiding Exception with initTAddress and can't make it work with static. # Note: `publicAddress` is only used an "example" IP to find the best route, # no data is send over the network to this IP! @@ -65,4 +65,4 @@ proc getRouteIpv4*(): Result[ValidIpAddress, cstring] = # This should not occur really. error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") - ok(ValidIpAddress.init(ip)) \ No newline at end of file + ok(ip) \ No newline at end of file diff --git a/codex/utils/options.nim b/codex/utils/options.nim index 45225e0d..0362eebf 100644 --- a/codex/utils/options.nim +++ b/codex/utils/options.nim @@ -8,6 +8,14 @@ proc `as`*[T](value: T, U: type): ?U = ## Casts a value to another type, returns an Option. ## When the cast succeeds, the option will contain the casted value. ## When the cast fails, the option will have no value. + + # In Nim 2.0.x, check 42.some as int == none(int) + # Maybe because some 42.some looks like Option[Option[int]] + # So we check first that the value is an option of the expected type. + # In that case, we do not need to do anything, just return the value as it is. + when value is Option[U]: + return value + when value is U: return some value elif value is ref object: diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index b01d95c6..9cf59489 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -40,7 +40,7 @@ proc timerLoop(timer: Timer) {.async: (raises: []).} = except CatchableError as exc: error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg -method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.base.} = +method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsafe, base.} = if timer.loopFuture != nil: return trace "Timer starting: ", name=timer.name diff --git a/config.nims b/config.nims index b64aacbd..6a4767ad 100644 --- a/config.nims +++ b/config.nims @@ -41,7 +41,7 @@ when defined(windows): # The dynamic Chronicles output currently prevents us from using colors on Windows # because these require direct manipulations of the stdout File object. - switch("define", "chronicles_colors=off") + switch("define", "chronicles_colors=NoColors") # This helps especially for 32-bit x86, which sans SSE2 and newer instructions # requires quite roundabout code generation for cryptography, and other 64-bit @@ -85,6 +85,8 @@ when (NimMajor, NimMinor) >= (1, 6): --warning:"DotLikeOps:off" when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11): --warning:"BareExcept:off" +when (NimMajor, NimMinor) >= (2, 0): + --mm:refc switch("define", "withoutPCRE") diff --git a/tests/codex/helpers/nodeutils.nim b/tests/codex/helpers/nodeutils.nim index 8c624bb2..42449ece 100644 --- a/tests/codex/helpers/nodeutils.nim +++ b/tests/codex/helpers/nodeutils.nim @@ -46,7 +46,8 @@ proc generateNodes*( networkStore = NetworkStore.new(engine, localStore) switch.mount(network) - result.add(( + + let nc : NodesComponents = ( switch, discovery, wallet, @@ -56,7 +57,9 @@ proc generateNodes*( pendingBlocks, blockDiscovery, engine, - networkStore)) + networkStore) + + result.add(nc) proc connectNodes*(nodes: seq[Switch]) {.async.} = for dialer in nodes: diff --git a/tests/codex/merkletree/helpers.nim b/tests/codex/merkletree/helpers.nim index 5816a12a..848b8469 100644 --- a/tests/codex/merkletree/helpers.nim +++ b/tests/codex/merkletree/helpers.nim @@ -5,7 +5,7 @@ import ../helpers export merkletree, helpers -converter toBool*(x: CtBool): bool = +converter toBool*(x: CTBool): bool = bool(x) proc `==`*(a, b: Poseidon2Tree): bool = diff --git a/tests/codex/node/helpers.nim b/tests/codex/node/helpers.nim index 3bfb3e31..5394354a 100644 --- a/tests/codex/node/helpers.nim +++ b/tests/codex/node/helpers.nim @@ -70,8 +70,8 @@ template setupAndTearDown*() {.dirty.} = network: BlockExcNetwork clock: Clock localStore: RepoStore - localStoreRepoDs: DataStore - localStoreMetaDs: DataStore + localStoreRepoDs: Datastore + localStoreMetaDs: Datastore engine: BlockExcEngine store: NetworkStore node: CodexNodeRef @@ -80,7 +80,6 @@ template setupAndTearDown*() {.dirty.} = pendingBlocks: PendingBlocksManager discovery: DiscoveryEngine advertiser: Advertiser - taskpool: Taskpool let path = currentSourcePath().parentDir @@ -110,14 +109,12 @@ template setupAndTearDown*() {.dirty.} = advertiser = Advertiser.new(localStore, blockDiscovery) engine = BlockExcEngine.new(localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks) store = NetworkStore.new(engine, localStore) - taskpool = Taskpool.new(num_threads = countProcessors()) node = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, prover = Prover.none, - discovery = blockDiscovery, - taskpool = taskpool) + discovery = blockDiscovery) teardown: close(file) diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 18ceac05..83d1ee98 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -2,7 +2,6 @@ import std/os import std/options import std/times import std/importutils -import std/cpuinfo import pkg/chronos import pkg/datastore @@ -76,7 +75,7 @@ asyncchecksuite "Test Node - Host contracts": manifestBlock = bt.Block.new( manifest.encode().tryGet(), codec = ManifestCodec).tryGet() - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifestCid = manifestBlock.cid manifestCidStr = $(manifestCid) diff --git a/tests/codex/node/testnode.nim b/tests/codex/node/testnode.nim index 765136a3..e750ecc3 100644 --- a/tests/codex/node/testnode.nim +++ b/tests/codex/node/testnode.nim @@ -2,7 +2,6 @@ import std/os import std/options import std/math import std/importutils -import std/cpuinfo import pkg/chronos import pkg/stew/byteutils @@ -13,7 +12,6 @@ import pkg/questionable/results import pkg/stint import pkg/poseidon2 import pkg/poseidon2/io -import pkg/taskpools import pkg/nitro import pkg/codexdht/discv5/protocol as discv5 @@ -139,7 +137,7 @@ asyncchecksuite "Test Node - Basic": test "Setup purchase request": let - erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) + erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifest = await storeDataGetManifest(localStore, chunker) manifestBlock = bt.Block.new( manifest.encode().tryGet(), diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index 215f8bb4..fe19ecb0 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -101,7 +101,7 @@ asyncchecksuite "Sales agent": clock.set(market.requestExpiry[request.id] + 1) check eventually onCancelCalled - for requestState in {RequestState.New, Started, Finished, Failed}: + for requestState in {RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed}: test "onCancelled is not called when request state is " & $requestState: agent.start(MockState.new()) await agent.subscribe() @@ -110,7 +110,7 @@ asyncchecksuite "Sales agent": await sleepAsync(100.millis) check not onCancelCalled - for requestState in {RequestState.Started, Finished, Failed}: + for requestState in {RequestState.Started, RequestState.Finished, RequestState.Failed}: test "cancelled future is finished when request state is " & $requestState: agent.start(MockState.new()) await agent.subscribe() diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index 54f4802d..d3310462 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -96,11 +96,11 @@ proc createProtectedManifest*( protectedTreeCid = protectedTree.rootCid().tryGet() for index, cid in cids[0.. 0: raiseAssert("TempLevelDb already active.") - self.currentPath = getTempDir() / "templeveldb" / $number / $getmonotime() + self.currentPath = getTempDir() / "templeveldb" / $number / $getMonoTime() inc number - createdir(self.currentPath) + createDir(self.currentPath) self.ds = LevelDbDatastore.new(self.currentPath).tryGet() return self.ds @@ -26,5 +26,5 @@ proc destroyDb*(self: TempLevelDb): Future[void] {.async.} = try: (await self.ds.close()).tryGet() finally: - removedir(self.currentPath) + removeDir(self.currentPath) self.currentPath = "" diff --git a/tests/integration/nodeprocess.nim b/tests/integration/nodeprocess.nim index 88613d2e..a08b4fe1 100644 --- a/tests/integration/nodeprocess.nim +++ b/tests/integration/nodeprocess.nim @@ -26,22 +26,22 @@ type name*: string NodeProcessError* = object of CatchableError -method workingDir(node: NodeProcess): string {.base.} = +method workingDir(node: NodeProcess): string {.base, gcsafe.} = raiseAssert "not implemented" -method executable(node: NodeProcess): string {.base.} = +method executable(node: NodeProcess): string {.base, gcsafe.} = raiseAssert "not implemented" -method startedOutput(node: NodeProcess): string {.base.} = +method startedOutput(node: NodeProcess): string {.base, gcsafe.} = raiseAssert "not implemented" -method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base.} = +method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base, gcsafe.} = raiseAssert "not implemented" -method outputLineEndings(node: NodeProcess): string {.base, raises: [].} = +method outputLineEndings(node: NodeProcess): string {.base, gcsafe raises: [].} = raiseAssert "not implemented" -method onOutputLineCaptured(node: NodeProcess, line: string) {.base, raises: [].} = +method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe, raises: [].} = raiseAssert "not implemented" method start*(node: NodeProcess) {.base, async.} = diff --git a/vendor/asynctest b/vendor/asynctest index 8e2f4e73..5154c0d7 160000 --- a/vendor/asynctest +++ b/vendor/asynctest @@ -1 +1 @@ -Subproject commit 8e2f4e73b97123be0f0041c129942b32df23ecb1 +Subproject commit 5154c0d79dd8bb086ab418cc659e923330ac24f2 diff --git a/vendor/codex-storage-proofs-circuits b/vendor/codex-storage-proofs-circuits index c03b4322..ac8d3667 160000 --- a/vendor/codex-storage-proofs-circuits +++ b/vendor/codex-storage-proofs-circuits @@ -1 +1 @@ -Subproject commit c03b43221d68e34bd5015a4e4ee1a0ad3299f8ef +Subproject commit ac8d3667526862458b162bee71dd5dcf6170c209 diff --git a/vendor/combparser b/vendor/combparser index ba4464c0..e582c436 160000 --- a/vendor/combparser +++ b/vendor/combparser @@ -1 +1 @@ -Subproject commit ba4464c005d7617c008e2ed2ebc1ba52feb469c6 +Subproject commit e582c436e8750b60253370fd77960509d36e3738 diff --git a/vendor/constantine b/vendor/constantine index 8367d7d1..bc3845aa 160000 --- a/vendor/constantine +++ b/vendor/constantine @@ -1 +1 @@ -Subproject commit 8367d7d19cdbba874aab961b70d272e742184c37 +Subproject commit bc3845aa492b52f7fef047503b1592e830d1a774 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index 99fcb340..667b4044 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit 99fcb3405c55b27cfffbf60f5368c55da7346f23 +Subproject commit 667b40440a53a58e9f922e29e20818720c62d9ac diff --git a/vendor/nim-blscurve b/vendor/nim-blscurve index 48d8668c..de2d3c79 160000 --- a/vendor/nim-blscurve +++ b/vendor/nim-blscurve @@ -1 +1 @@ -Subproject commit 48d8668c5a9a350d3a7ee0c3713ef9a11980a40d +Subproject commit de2d3c79264bba18dbea469c8c5c4b3bb3c8bc55 diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index c9c8e58e..81a4a7a3 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit c9c8e58ec3f89b655a046c485f622f9021c68b61 +Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 035ae11b..c04576d8 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9 +Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0 diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht index 63822e83..4bd3a39e 160000 --- a/vendor/nim-codex-dht +++ b/vendor/nim-codex-dht @@ -1 +1 @@ -Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6 +Subproject commit 4bd3a39e0030f8ee269ef217344b6b59ec2be6dc diff --git a/vendor/nim-confutils b/vendor/nim-confutils index 2028b416..cb858a27 160000 --- a/vendor/nim-confutils +++ b/vendor/nim-confutils @@ -1 +1 @@ -Subproject commit 2028b41602b3abf7c9bf450744efde7b296707a2 +Subproject commit cb858a27f4347be949d10ed74b58713d687936d2 diff --git a/vendor/nim-contract-abi b/vendor/nim-contract-abi index 61f8f59b..842f4891 160000 --- a/vendor/nim-contract-abi +++ b/vendor/nim-contract-abi @@ -1 +1 @@ -Subproject commit 61f8f59b3917d8e27c6eb4330a6d8cf428e98b2d +Subproject commit 842f48910be4f388bcbf8abf1f02aba1d5e2ee64 diff --git a/vendor/nim-datastore b/vendor/nim-datastore index 3ab6b84a..d67860ad 160000 --- a/vendor/nim-datastore +++ b/vendor/nim-datastore @@ -1 +1 @@ -Subproject commit 3ab6b84a634a7b2ee8c0144f050bf5893cd47c17 +Subproject commit d67860add63fd23cdacde1d3da8f4739c2660c2d diff --git a/vendor/nim-eth b/vendor/nim-eth index 15a09fab..dcfbc429 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 15a09fab737d08a2545284c727199c377bb0f4b7 +Subproject commit dcfbc4291d39b59563828c3e32be4d51a2f25931 diff --git a/vendor/nim-ethers b/vendor/nim-ethers index 2808a054..1cfccb96 160000 --- a/vendor/nim-ethers +++ b/vendor/nim-ethers @@ -1 +1 @@ -Subproject commit 2808a05488152c8b438d947dc871445164fa1278 +Subproject commit 1cfccb9695fa47860bf7ef3d75da9019096a3933 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 720fc5e5..cf8d4d22 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 720fc5e5c8e428d9d0af618e1e27c44b42350309 +Subproject commit cf8d4d22636b8e514caf17e49f9c786ac56b0e85 diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index be57dbc9..8bb1acba 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45 +Subproject commit 8bb1acbaa4b86eb866145b0d468eff64a57d1897 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 0408795b..27437213 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 0408795be95c00d75e96eaef6eae8a9c734014f5 +Subproject commit 274372132de497e6b7b793c9d5d5474b71bf80a2 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index 5127b26e..6eadb6e9 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit 5127b26ee58076e9369e7c126c196793c2b12e73 +Subproject commit 6eadb6e939ffa7882ff5437033c11a9464d3385c diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 895ff24c..3e09d811 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97 +Subproject commit 3e09d8113f874f3584c3fe93818541b2ff9fb9c3 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index b29c22ba..6da0cda8 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit b29c22ba0ef13de50b779c776830dbea1d50cd33 +Subproject commit 6da0cda88ab7780bd5fd342327adb91ab84692aa diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index b239791c..036e110a 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit b239791c568d9f9a76fd66d2322b2754700b6cc5 +Subproject commit 036e110a6080fba1a1662c58cfd8c21f9a548021 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 6142e433..cacfdc12 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 6142e433fc8ea9b73379770a788017ac528d46ff +Subproject commit cacfdc12454a0804c65112b9f4f50d1375208dcd diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal index 27d314d6..5e405974 160000 --- a/vendor/nim-nat-traversal +++ b/vendor/nim-nat-traversal @@ -1 +1 @@ -Subproject commit 27d314d65c9078924b3239fe4e2f5af0c512b28c +Subproject commit 5e4059746e9095e1731b02eeaecd62a70fbe664d diff --git a/vendor/nim-nitro b/vendor/nim-nitro index 6b4c455b..e3719433 160000 --- a/vendor/nim-nitro +++ b/vendor/nim-nitro @@ -1 +1 @@ -Subproject commit 6b4c455bf4dad7449c1580055733a1738fcd5aec +Subproject commit e3719433d5ace25947c468787c805969642b3913 diff --git a/vendor/nim-poseidon2 b/vendor/nim-poseidon2 index 0346982f..4e2c6e61 160000 --- a/vendor/nim-poseidon2 +++ b/vendor/nim-poseidon2 @@ -1 +1 @@ -Subproject commit 0346982f2c6891bcedd03d552af3a3bd57b2c1f9 +Subproject commit 4e2c6e619b2f2859aaa4b2aed2f346ea4d0c67a3 diff --git a/vendor/nim-presto b/vendor/nim-presto index c17bfdda..92b1c7ff 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit c17bfdda2c60cf5fadb043feb22e328b7659c719 +Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be diff --git a/vendor/nim-protobuf-serialization b/vendor/nim-protobuf-serialization index 28214b3e..5a31137a 160000 --- a/vendor/nim-protobuf-serialization +++ b/vendor/nim-protobuf-serialization @@ -1 +1 @@ -Subproject commit 28214b3e40c755a9886d2ec8f261ec48fbb6bec6 +Subproject commit 5a31137a82c2b6a989c9ed979bb636c7a49f570e diff --git a/vendor/nim-results b/vendor/nim-results index f3c666a2..df8113dd 160000 --- a/vendor/nim-results +++ b/vendor/nim-results @@ -1 +1 @@ -Subproject commit f3c666a272c69d70cb41e7245e7f6844797303ad +Subproject commit df8113dda4c2d74d460a8fa98252b0b771bf1f27 diff --git a/vendor/nim-serde b/vendor/nim-serde index 83e4a2cc..69a7a011 160000 --- a/vendor/nim-serde +++ b/vendor/nim-serde @@ -1 +1 @@ -Subproject commit 83e4a2ccf621d3040c6e7e0267393ca2d205988e +Subproject commit 69a7a0111addaa4aad885dd4bd7b5ee4684a06de diff --git a/vendor/nim-serialization b/vendor/nim-serialization index f709bd9e..2086c996 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit f709bd9e16b1b6870fe3e4401196479e014a2ef6 +Subproject commit 2086c99608b4bf472e1ef5fe063710f280243396 diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index 362e1bd9..05bbff1a 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit 362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3 +Subproject commit 05bbff1af4e8fe2d972ba4b0667b89ca94d3ebba diff --git a/vendor/nim-stew b/vendor/nim-stew index 7afe7e3c..a6e19813 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 7afe7e3c070758cac1f628e4330109f3ef6fc853 +Subproject commit a6e198132097fb544d04959aeb3b839e1408f942 diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index b3673c7a..66585e2e 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit b3673c7a7a959ccacb393bd9b47e997bbd177f5a +Subproject commit 66585e2e960b7695e48ea60377fb3aeac96406e8 diff --git a/vendor/nim-testutils b/vendor/nim-testutils index b56a5953..4d37244f 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit b56a5953e37fc5117bd6ea6dfa18418c5e112815 +Subproject commit 4d37244f9f5e1acd8592a4ceb5c3fc47bc160181 diff --git a/vendor/nim-toml-serialization b/vendor/nim-toml-serialization index 86d47713..fea85b27 160000 --- a/vendor/nim-toml-serialization +++ b/vendor/nim-toml-serialization @@ -1 +1 @@ -Subproject commit 86d477136f105f04bfd0dd7c0e939593d81fc581 +Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index b178f475..845b6af2 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit b178f47527074964f76c395ad0dfc81cf118f379 +Subproject commit 845b6af28b9f68f02d320e03ad18eccccea7ddb9 diff --git a/vendor/nim-websock b/vendor/nim-websock index 2c3ae313..ebe308a7 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit 2c3ae3137f3c9cb48134285bd4a47186fa51f0e8 +Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 diff --git a/vendor/nim-zlib b/vendor/nim-zlib index f34ca261..91cf360b 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit f34ca261efd90f118dc1647beefd2f7a69b05d93 +Subproject commit 91cf360b1aeb2e0c753ff8bac6de22a41c5ed8cd diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index fe9bc3f3..4c6ff070 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782 +Subproject commit 4c6ff070c116450bb2c285691724ac9e6202cb28 diff --git a/vendor/nimcrypto b/vendor/nimcrypto index 24e006df..dc07e305 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit 24e006df85927f64916e60511620583b11403178 +Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc diff --git a/vendor/npeg b/vendor/npeg index b15a10e3..409f6796 160000 --- a/vendor/npeg +++ b/vendor/npeg @@ -1 +1 @@ -Subproject commit b15a10e388b91b898c581dbbcb6a718d46b27d2f +Subproject commit 409f6796d0e880b3f0222c964d1da7de6e450811 diff --git a/vendor/stint b/vendor/stint index 86621ece..5c5e01ce 160000 --- a/vendor/stint +++ b/vendor/stint @@ -1 +1 @@ -Subproject commit 86621eced1dcfb5e25903019ebcfc76ed9128ec5 +Subproject commit 5c5e01cef089a261474b7abfe246b37447aaa8ed diff --git a/vendor/upraises b/vendor/upraises index ff4f8108..bc262898 160000 --- a/vendor/upraises +++ b/vendor/upraises @@ -1 +1 @@ -Subproject commit ff4f8108e44fba9b35cac535ab63d3927e8fd3c2 +Subproject commit bc2628989b63854d980e92dadbd58f83e34b6f25