Scheduling erasure coding on another thread (#716)
* Scheduling erasure coding on another thread * Code review fixes * Fix for review comments * Fix missing import --------- Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
This commit is contained in:
parent
de1714ed06
commit
59d9439ae9
|
@ -11,6 +11,7 @@ import std/sequtils
|
||||||
import std/strutils
|
import std/strutils
|
||||||
import std/os
|
import std/os
|
||||||
import std/tables
|
import std/tables
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/presto
|
import pkg/presto
|
||||||
|
@ -23,6 +24,7 @@ import pkg/stew/shims/net as stewnet
|
||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
import pkg/ethers except Rng
|
import pkg/ethers except Rng
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import ./node
|
import ./node
|
||||||
import ./conf
|
import ./conf
|
||||||
|
@ -53,6 +55,7 @@ type
|
||||||
codexNode: CodexNodeRef
|
codexNode: CodexNodeRef
|
||||||
repoStore: RepoStore
|
repoStore: RepoStore
|
||||||
maintenance: BlockMaintainer
|
maintenance: BlockMaintainer
|
||||||
|
taskpool: Taskpool
|
||||||
|
|
||||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||||
EthWallet = ethers.Wallet
|
EthWallet = ethers.Wallet
|
||||||
|
@ -180,6 +183,10 @@ proc start*(s: CodexServer) {.async.} =
|
||||||
proc stop*(s: CodexServer) {.async.} =
|
proc stop*(s: CodexServer) {.async.} =
|
||||||
notice "Stopping codex node"
|
notice "Stopping codex node"
|
||||||
|
|
||||||
|
|
||||||
|
s.taskpool.syncAll()
|
||||||
|
s.taskpool.shutdown()
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
s.restServer.stop(),
|
s.restServer.stop(),
|
||||||
s.codexNode.switch.stop(),
|
s.codexNode.switch.stop(),
|
||||||
|
@ -290,12 +297,15 @@ proc new*(
|
||||||
else:
|
else:
|
||||||
none Prover
|
none Prover
|
||||||
|
|
||||||
|
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||||
|
|
||||||
codexNode = CodexNodeRef.new(
|
codexNode = CodexNodeRef.new(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
networkStore = store,
|
networkStore = store,
|
||||||
engine = engine,
|
engine = engine,
|
||||||
prover = prover,
|
prover = prover,
|
||||||
discovery = discovery)
|
discovery = discovery,
|
||||||
|
taskpool = taskpool)
|
||||||
|
|
||||||
restServer = RestServerRef.new(
|
restServer = RestServerRef.new(
|
||||||
codexNode.initRestApi(config, repoStore),
|
codexNode.initRestApi(config, repoStore),
|
||||||
|
@ -311,4 +321,5 @@ proc new*(
|
||||||
codexNode: codexNode,
|
codexNode: codexNode,
|
||||||
restServer: restServer,
|
restServer: restServer,
|
||||||
repoStore: repoStore,
|
repoStore: repoStore,
|
||||||
maintenance: maintenance)
|
maintenance: maintenance,
|
||||||
|
taskpool: taskpool)
|
||||||
|
|
|
@ -0,0 +1,211 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
import std/sequtils
|
||||||
|
|
||||||
|
import pkg/taskpools
|
||||||
|
import pkg/taskpools/flowvars
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/chronos/threadsync
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import ./backend
|
||||||
|
import ../errors
|
||||||
|
import ../logutils
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "codex asyncerasure"
|
||||||
|
|
||||||
|
const
|
||||||
|
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
|
||||||
|
CompletitionRetryDelay = 10.millis
|
||||||
|
|
||||||
|
type
|
||||||
|
EncoderBackendPtr = ptr EncoderBackend
|
||||||
|
DecoderBackendPtr = ptr DecoderBackend
|
||||||
|
|
||||||
|
# Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy
|
||||||
|
EncodeTaskArgs = object
|
||||||
|
signal: ThreadSignalPtr
|
||||||
|
backend: EncoderBackendPtr
|
||||||
|
blockSize: int
|
||||||
|
ecM: int
|
||||||
|
|
||||||
|
DecodeTaskArgs = object
|
||||||
|
signal: ThreadSignalPtr
|
||||||
|
backend: DecoderBackendPtr
|
||||||
|
blockSize: int
|
||||||
|
ecK: int
|
||||||
|
|
||||||
|
SharedArrayHolder*[T] = object
|
||||||
|
data: ptr UncheckedArray[T]
|
||||||
|
size: int
|
||||||
|
|
||||||
|
EncodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||||
|
DecodeTaskResult = Result[SharedArrayHolder[byte], cstring]
|
||||||
|
|
||||||
|
proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
|
||||||
|
var
|
||||||
|
data = data.unsafeAddr
|
||||||
|
parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize))
|
||||||
|
|
||||||
|
try:
|
||||||
|
let res = args.backend[].encode(data[], parity)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
let
|
||||||
|
resDataSize = parity.len * args.blockSize
|
||||||
|
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||||
|
arrHolder = SharedArrayHolder[byte](
|
||||||
|
data: resData,
|
||||||
|
size: resDataSize
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in 0..<parity.len:
|
||||||
|
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
|
||||||
|
|
||||||
|
return ok(arrHolder)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
except CatchableError as exception:
|
||||||
|
return err(exception.msg.cstring)
|
||||||
|
finally:
|
||||||
|
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||||
|
error "Error firing signal", msg = err.msg
|
||||||
|
|
||||||
|
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
|
||||||
|
var
|
||||||
|
data = data.unsafeAddr
|
||||||
|
parity = parity.unsafeAddr
|
||||||
|
recovered = newSeqWith[seq[byte]](args.ecK, newSeq[byte](args.blockSize))
|
||||||
|
|
||||||
|
try:
|
||||||
|
let res = args.backend[].decode(data[], parity[], recovered)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
let
|
||||||
|
resDataSize = recovered.len * args.blockSize
|
||||||
|
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
|
||||||
|
arrHolder = SharedArrayHolder[byte](
|
||||||
|
data: resData,
|
||||||
|
size: resDataSize
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in 0..<recovered.len:
|
||||||
|
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
|
||||||
|
|
||||||
|
return ok(arrHolder)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
|
except CatchableError as exception:
|
||||||
|
return err(exception.msg.cstring)
|
||||||
|
finally:
|
||||||
|
if err =? args.signal.fireSync().mapFailure.errorOption():
|
||||||
|
error "Error firing signal", msg = err.msg
|
||||||
|
|
||||||
|
proc proxySpawnEncodeTask(
|
||||||
|
tp: Taskpool,
|
||||||
|
args: EncodeTaskArgs,
|
||||||
|
data: ref seq[seq[byte]]
|
||||||
|
): Flowvar[EncodeTaskResult] =
|
||||||
|
tp.spawn encodeTask(args, data[])
|
||||||
|
|
||||||
|
proc proxySpawnDecodeTask(
|
||||||
|
tp: Taskpool,
|
||||||
|
args: DecodeTaskArgs,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
parity: ref seq[seq[byte]]
|
||||||
|
): Flowvar[DecodeTaskResult] =
|
||||||
|
tp.spawn decodeTask(args, data[], parity[])
|
||||||
|
|
||||||
|
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
||||||
|
await wait(signal)
|
||||||
|
|
||||||
|
var
|
||||||
|
res: T
|
||||||
|
awaitTotal: Duration
|
||||||
|
while awaitTotal < CompletitionTimeout:
|
||||||
|
if handle.tryComplete(res):
|
||||||
|
return success(res)
|
||||||
|
else:
|
||||||
|
awaitTotal += CompletitionRetryDelay
|
||||||
|
await sleepAsync(CompletitionRetryDelay)
|
||||||
|
|
||||||
|
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
|
||||||
|
|
||||||
|
proc asyncEncode*(
|
||||||
|
tp: Taskpool,
|
||||||
|
backend: EncoderBackend,
|
||||||
|
data: ref seq[seq[byte]],
|
||||||
|
blockSize: int,
|
||||||
|
ecM: int
|
||||||
|
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||||
|
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
blockSize = data[0].len
|
||||||
|
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
|
||||||
|
handle = proxySpawnEncodeTask(tp, args, data)
|
||||||
|
|
||||||
|
without res =? await awaitResult(signal, handle), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
var parity = seq[seq[byte]].new()
|
||||||
|
parity[].setLen(ecM)
|
||||||
|
|
||||||
|
for i in 0..<parity[].len:
|
||||||
|
parity[i] = newSeq[byte](blockSize)
|
||||||
|
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||||
|
|
||||||
|
deallocShared(res.value.data)
|
||||||
|
|
||||||
|
return success(parity)
|
||||||
|
else:
|
||||||
|
return failure($res.error)
|
||||||
|
finally:
|
||||||
|
if err =? signal.close().mapFailure.errorOption():
|
||||||
|
error "Error closing signal", msg = $err.msg
|
||||||
|
|
||||||
|
proc asyncDecode*(
|
||||||
|
tp: Taskpool,
|
||||||
|
backend: DecoderBackend,
|
||||||
|
data, parity: ref seq[seq[byte]],
|
||||||
|
blockSize: int
|
||||||
|
): Future[?!ref seq[seq[byte]]] {.async.} =
|
||||||
|
without signal =? ThreadSignalPtr.new().mapFailure, err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
ecK = data[].len
|
||||||
|
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
|
||||||
|
handle = proxySpawnDecodeTask(tp, args, data, parity)
|
||||||
|
|
||||||
|
without res =? await awaitResult(signal, handle), err:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if res.isOk:
|
||||||
|
var recovered = seq[seq[byte]].new()
|
||||||
|
recovered[].setLen(ecK)
|
||||||
|
|
||||||
|
for i in 0..<recovered[].len:
|
||||||
|
recovered[i] = newSeq[byte](blockSize)
|
||||||
|
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)
|
||||||
|
|
||||||
|
deallocShared(res.value.data)
|
||||||
|
|
||||||
|
return success(recovered)
|
||||||
|
else:
|
||||||
|
return failure($res.error)
|
||||||
|
finally:
|
||||||
|
if err =? signal.close().mapFailure.errorOption():
|
||||||
|
error "Error closing signal", msg = $err.msg
|
|
@ -17,6 +17,7 @@ import std/sugar
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p/[multicodec, cid, multihash]
|
import pkg/libp2p/[multicodec, cid, multihash]
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import ../logutils
|
import ../logutils
|
||||||
import ../manifest
|
import ../manifest
|
||||||
|
@ -30,6 +31,7 @@ import ../indexingstrategy
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
|
|
||||||
import ./backend
|
import ./backend
|
||||||
|
import ./asyncbackend
|
||||||
|
|
||||||
export backend
|
export backend
|
||||||
|
|
||||||
|
@ -70,6 +72,7 @@ type
|
||||||
encoderProvider*: EncoderProvider
|
encoderProvider*: EncoderProvider
|
||||||
decoderProvider*: DecoderProvider
|
decoderProvider*: DecoderProvider
|
||||||
store*: BlockStore
|
store*: BlockStore
|
||||||
|
taskpool: Taskpool
|
||||||
|
|
||||||
EncodingParams = object
|
EncodingParams = object
|
||||||
ecK: Natural
|
ecK: Natural
|
||||||
|
@ -282,30 +285,23 @@ proc encodeData(
|
||||||
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
||||||
var
|
var
|
||||||
data = seq[seq[byte]].new() # number of blocks to encode
|
data = seq[seq[byte]].new() # number of blocks to encode
|
||||||
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
|
||||||
|
|
||||||
data[].setLen(params.ecK)
|
data[].setLen(params.ecK)
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
|
||||||
# other events to be processed, this should be addressed
|
|
||||||
# by threading
|
|
||||||
await sleepAsync(10.millis)
|
|
||||||
|
|
||||||
without resolved =?
|
without resolved =?
|
||||||
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
trace "Erasure coding data", data = data[].len, parity = params.ecM
|
||||||
|
|
||||||
if (
|
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
|
||||||
let res = encoder.encode(data[], parityData);
|
trace "Error encoding data", err = err.msg
|
||||||
res.isErr):
|
return failure(err)
|
||||||
trace "Unable to encode manifest!", error = $res.error
|
|
||||||
return failure($res.error)
|
|
||||||
|
|
||||||
var idx = params.rounded + step
|
var idx = params.rounded + step
|
||||||
for j in 0..<params.ecM:
|
for j in 0..<params.ecM:
|
||||||
without blk =? bt.Block.new(parityData[j]), error:
|
without blk =? bt.Block.new(parity[j]), error:
|
||||||
trace "Unable to create parity block", err = error.msg
|
trace "Unable to create parity block", err = error.msg
|
||||||
return failure(error)
|
return failure(error)
|
||||||
|
|
||||||
|
@ -390,21 +386,15 @@ proc decode*(
|
||||||
cids[].setLen(encoded.blocksCount)
|
cids[].setLen(encoded.blocksCount)
|
||||||
try:
|
try:
|
||||||
for step in 0..<encoded.steps:
|
for step in 0..<encoded.steps:
|
||||||
# TODO: this is a tight blocking loop so we sleep here to allow
|
|
||||||
# other events to be processed, this should be addressed
|
|
||||||
# by threading
|
|
||||||
await sleepAsync(10.millis)
|
|
||||||
|
|
||||||
var
|
var
|
||||||
data = seq[seq[byte]].new()
|
data = seq[seq[byte]].new()
|
||||||
parityData = seq[seq[byte]].new()
|
parity = seq[seq[byte]].new()
|
||||||
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
|
||||||
|
|
||||||
data[].setLen(encoded.ecK) # set len to K
|
data[].setLen(encoded.ecK) # set len to K
|
||||||
parityData[].setLen(encoded.ecM) # set len to M
|
parity[].setLen(encoded.ecM) # set len to M
|
||||||
|
|
||||||
without (dataPieces, _) =?
|
without (dataPieces, _) =?
|
||||||
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
|
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
|
||||||
trace "Unable to prepare data", error = err.msg
|
trace "Unable to prepare data", error = err.msg
|
||||||
return failure(err)
|
return failure(err)
|
||||||
|
|
||||||
|
@ -413,11 +403,10 @@ proc decode*(
|
||||||
continue
|
continue
|
||||||
|
|
||||||
trace "Erasure decoding data"
|
trace "Erasure decoding data"
|
||||||
if (
|
|
||||||
let err = decoder.decode(data[], parityData[], recovered);
|
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
|
||||||
err.isErr):
|
trace "Error decoding data", err = err.msg
|
||||||
trace "Unable to decode data!", err = $err.error
|
return failure(err)
|
||||||
return failure($err.error)
|
|
||||||
|
|
||||||
for i in 0..<encoded.ecK:
|
for i in 0..<encoded.ecK:
|
||||||
let idx = i * encoded.steps + step
|
let idx = i * encoded.steps + step
|
||||||
|
@ -472,10 +461,13 @@ proc new*(
|
||||||
T: type Erasure,
|
T: type Erasure,
|
||||||
store: BlockStore,
|
store: BlockStore,
|
||||||
encoderProvider: EncoderProvider,
|
encoderProvider: EncoderProvider,
|
||||||
decoderProvider: DecoderProvider): Erasure =
|
decoderProvider: DecoderProvider,
|
||||||
|
taskpool: Taskpool): Erasure =
|
||||||
## Create a new Erasure instance for encoding and decoding manifests
|
## Create a new Erasure instance for encoding and decoding manifests
|
||||||
|
##
|
||||||
|
|
||||||
Erasure(
|
Erasure(
|
||||||
store: store,
|
store: store,
|
||||||
encoderProvider: encoderProvider,
|
encoderProvider: encoderProvider,
|
||||||
decoderProvider: decoderProvider)
|
decoderProvider: decoderProvider,
|
||||||
|
taskpool: taskpool)
|
||||||
|
|
|
@ -13,6 +13,7 @@ import std/options
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/strformat
|
import std/strformat
|
||||||
import std/sugar
|
import std/sugar
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
@ -25,6 +26,7 @@ import pkg/libp2p/stream/bufferstream
|
||||||
# TODO: remove once exported by libp2p
|
# TODO: remove once exported by libp2p
|
||||||
import pkg/libp2p/routing_record
|
import pkg/libp2p/routing_record
|
||||||
import pkg/libp2p/signed_envelope
|
import pkg/libp2p/signed_envelope
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import ./chunker
|
import ./chunker
|
||||||
import ./slots
|
import ./slots
|
||||||
|
@ -69,6 +71,7 @@ type
|
||||||
contracts*: Contracts
|
contracts*: Contracts
|
||||||
clock*: Clock
|
clock*: Clock
|
||||||
storage*: Contracts
|
storage*: Contracts
|
||||||
|
taskpool*: Taskpool
|
||||||
|
|
||||||
CodexNodeRef* = ref CodexNode
|
CodexNodeRef* = ref CodexNode
|
||||||
|
|
||||||
|
@ -253,7 +256,8 @@ proc streamEntireDataset(
|
||||||
erasure = Erasure.new(
|
erasure = Erasure.new(
|
||||||
self.networkStore,
|
self.networkStore,
|
||||||
leoEncoderProvider,
|
leoEncoderProvider,
|
||||||
leoDecoderProvider)
|
leoDecoderProvider,
|
||||||
|
self.taskpool)
|
||||||
without _ =? (await erasure.decode(manifest)), error:
|
without _ =? (await erasure.decode(manifest)), error:
|
||||||
trace "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
trace "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
@ -420,7 +424,8 @@ proc setupRequest(
|
||||||
erasure = Erasure.new(
|
erasure = Erasure.new(
|
||||||
self.networkStore.localStore,
|
self.networkStore.localStore,
|
||||||
leoEncoderProvider,
|
leoEncoderProvider,
|
||||||
leoDecoderProvider)
|
leoDecoderProvider,
|
||||||
|
self.taskpool)
|
||||||
|
|
||||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||||
trace "Unable to erasure code dataset"
|
trace "Unable to erasure code dataset"
|
||||||
|
@ -748,7 +753,8 @@ proc new*(
|
||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine,
|
||||||
discovery: Discovery,
|
discovery: Discovery,
|
||||||
prover = Prover.none,
|
prover = Prover.none,
|
||||||
contracts = Contracts.default): CodexNodeRef =
|
contracts = Contracts.default,
|
||||||
|
taskpool = Taskpool.new(num_threads = countProcessors())): CodexNodeRef =
|
||||||
## Create new instance of a Codex self, call `start` to run it
|
## Create new instance of a Codex self, call `start` to run it
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -758,4 +764,5 @@ proc new*(
|
||||||
engine: engine,
|
engine: engine,
|
||||||
prover: prover,
|
prover: prover,
|
||||||
discovery: discovery,
|
discovery: discovery,
|
||||||
contracts: contracts)
|
contracts: contracts,
|
||||||
|
taskpool: taskpool)
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
import std/tables
|
import std/tables
|
||||||
import std/times
|
import std/times
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import pkg/codex/codextypes
|
import pkg/codex/codextypes
|
||||||
import pkg/codex/chunker
|
import pkg/codex/chunker
|
||||||
|
@ -81,6 +83,7 @@ template setupAndTearDown*() {.dirty.} =
|
||||||
peerStore: PeerCtxStore
|
peerStore: PeerCtxStore
|
||||||
pendingBlocks: PendingBlocksManager
|
pendingBlocks: PendingBlocksManager
|
||||||
discovery: DiscoveryEngine
|
discovery: DiscoveryEngine
|
||||||
|
taskpool: Taskpool
|
||||||
|
|
||||||
let
|
let
|
||||||
path = currentSourcePath().parentDir
|
path = currentSourcePath().parentDir
|
||||||
|
@ -107,12 +110,14 @@ template setupAndTearDown*() {.dirty.} =
|
||||||
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
|
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
|
||||||
engine = BlockExcEngine.new(localStore, wallet, network, discovery, peerStore, pendingBlocks)
|
engine = BlockExcEngine.new(localStore, wallet, network, discovery, peerStore, pendingBlocks)
|
||||||
store = NetworkStore.new(engine, localStore)
|
store = NetworkStore.new(engine, localStore)
|
||||||
|
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||||
node = CodexNodeRef.new(
|
node = CodexNodeRef.new(
|
||||||
switch = switch,
|
switch = switch,
|
||||||
networkStore = store,
|
networkStore = store,
|
||||||
engine = engine,
|
engine = engine,
|
||||||
prover = Prover.none,
|
prover = Prover.none,
|
||||||
discovery = blockDiscovery)
|
discovery = blockDiscovery,
|
||||||
|
taskpool = taskpool)
|
||||||
|
|
||||||
await node.start()
|
await node.start()
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import std/math
|
||||||
import std/times
|
import std/times
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/importutils
|
import std/importutils
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
|
@ -13,6 +14,7 @@ import pkg/questionable/results
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/poseidon2
|
import pkg/poseidon2
|
||||||
import pkg/poseidon2/io
|
import pkg/poseidon2/io
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/codexdht/discv5/protocol as discv5
|
import pkg/codexdht/discv5/protocol as discv5
|
||||||
|
@ -78,7 +80,7 @@ asyncchecksuite "Test Node - Host contracts":
|
||||||
manifestBlock = bt.Block.new(
|
manifestBlock = bt.Block.new(
|
||||||
manifest.encode().tryGet(),
|
manifest.encode().tryGet(),
|
||||||
codec = ManifestCodec).tryGet()
|
codec = ManifestCodec).tryGet()
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
||||||
|
|
||||||
manifestCid = manifestBlock.cid
|
manifestCid = manifestBlock.cid
|
||||||
manifestCidStr = $(manifestCid)
|
manifestCidStr = $(manifestCid)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import std/math
|
||||||
import std/times
|
import std/times
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/importutils
|
import std/importutils
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
|
@ -13,6 +14,7 @@ import pkg/questionable/results
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/poseidon2
|
import pkg/poseidon2
|
||||||
import pkg/poseidon2/io
|
import pkg/poseidon2/io
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/codexdht/discv5/protocol as discv5
|
import pkg/codexdht/discv5/protocol as discv5
|
||||||
|
@ -135,7 +137,7 @@ asyncchecksuite "Test Node - Basic":
|
||||||
|
|
||||||
test "Setup purchase request":
|
test "Setup purchase request":
|
||||||
let
|
let
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
||||||
manifest = await storeDataGetManifest(localStore, chunker)
|
manifest = await storeDataGetManifest(localStore, chunker)
|
||||||
manifestBlock = bt.Block.new(
|
manifestBlock = bt.Block.new(
|
||||||
manifest.encode().tryGet(),
|
manifest.encode().tryGet(),
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
import std/sugar
|
||||||
|
import std/cpuinfo
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/datastore
|
import pkg/datastore
|
||||||
|
@ -12,6 +13,7 @@ import pkg/codex/blocktype as bt
|
||||||
import pkg/codex/rng
|
import pkg/codex/rng
|
||||||
import pkg/codex/utils
|
import pkg/codex/utils
|
||||||
import pkg/codex/indexingstrategy
|
import pkg/codex/indexingstrategy
|
||||||
|
import pkg/taskpools
|
||||||
|
|
||||||
import ../asynctest
|
import ../asynctest
|
||||||
import ./helpers
|
import ./helpers
|
||||||
|
@ -25,6 +27,7 @@ suite "Erasure encode/decode":
|
||||||
var manifest: Manifest
|
var manifest: Manifest
|
||||||
var store: BlockStore
|
var store: BlockStore
|
||||||
var erasure: Erasure
|
var erasure: Erasure
|
||||||
|
var taskpool: Taskpool
|
||||||
|
|
||||||
setup:
|
setup:
|
||||||
let
|
let
|
||||||
|
@ -33,7 +36,8 @@ suite "Erasure encode/decode":
|
||||||
rng = Rng.instance()
|
rng = Rng.instance()
|
||||||
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
||||||
store = RepoStore.new(repoDs, metaDs)
|
store = RepoStore.new(repoDs, metaDs)
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
taskpool = Taskpool.new(num_threads = countProcessors())
|
||||||
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
||||||
manifest = await storeDataGetManifest(store, chunker)
|
manifest = await storeDataGetManifest(store, chunker)
|
||||||
|
|
||||||
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
|
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
|
||||||
|
|
Loading…
Reference in New Issue