Improving proc/func formatting consistency (#454)

* Fixes/workarounds for nimsuggest failures in codex.nim.

* remove rng prefix - it appears to work now

* format new's to be more consistent

* making proc formatting a bit more consistent
This commit is contained in:
Jaremy Creechley 2023-06-22 08:11:18 -07:00 committed by GitHub
parent 2a92dc9702
commit e47b38af11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 594 additions and 391 deletions

View File

@ -242,19 +242,22 @@ proc stop*(b: DiscoveryEngine) {.async.} =
trace "Discovery engine stopped" trace "Discovery engine stopped"
proc new*( proc new*(
T: type DiscoveryEngine, T: type DiscoveryEngine,
localStore: BlockStore, localStore: BlockStore,
peers: PeerCtxStore, peers: PeerCtxStore,
network: BlockExcNetwork, network: BlockExcNetwork,
discovery: Discovery, discovery: Discovery,
pendingBlocks: PendingBlocksManager, pendingBlocks: PendingBlocksManager,
concurrentAdvReqs = DefaultConcurrentAdvertRequests, concurrentAdvReqs = DefaultConcurrentAdvertRequests,
concurrentDiscReqs = DefaultConcurrentDiscRequests, concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep, discoveryLoopSleep = DefaultDiscoveryLoopSleep,
advertiseLoopSleep = DefaultAdvertiseLoopSleep, advertiseLoopSleep = DefaultAdvertiseLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock, minPeersPerBlock = DefaultMinPeersPerBlock,
advertiseType = BlockType.Both): DiscoveryEngine = advertiseType = BlockType.Both
T( ): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
DiscoveryEngine(
localStore: localStore, localStore: localStore,
peers: peers, peers: peers,
network: network, network: network,

View File

@ -496,15 +496,18 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async.} =
trace "Exiting blockexc task runner" trace "Exiting blockexc task runner"
proc new*( proc new*(
T: type BlockExcEngine, T: type BlockExcEngine,
localStore: BlockStore, localStore: BlockStore,
wallet: WalletRef, wallet: WalletRef,
network: BlockExcNetwork, network: BlockExcNetwork,
discovery: DiscoveryEngine, discovery: DiscoveryEngine,
peerStore: PeerCtxStore, peerStore: PeerCtxStore,
pendingBlocks: PendingBlocksManager, pendingBlocks: PendingBlocksManager,
concurrentTasks = DefaultConcurrentTasks, concurrentTasks = DefaultConcurrentTasks,
peersPerRequest = DefaultMaxPeersPerRequest): T = peersPerRequest = DefaultMaxPeersPerRequest
): BlockExcEngine =
## Create new block exchange engine instance
##
let let
engine = BlockExcEngine( engine = BlockExcEngine(

View File

@ -34,10 +34,11 @@ type
blocks*: Table[Cid, BlockReq] # pending Block requests blocks*: Table[Cid, BlockReq] # pending Block requests
proc getWantHandle*( proc getWantHandle*(
p: PendingBlocksManager, p: PendingBlocksManager,
cid: Cid, cid: Cid,
timeout = DefaultBlockTimeout, timeout = DefaultBlockTimeout,
inFlight = false): Future[Block] {.async.} = inFlight = false
): Future[Block] {.async.} =
## Add an event for a block ## Add an event for a block
## ##
@ -60,9 +61,8 @@ proc getWantHandle*(
finally: finally:
p.blocks.del(cid) p.blocks.del(cid)
proc resolve*( proc resolve*(p: PendingBlocksManager,
p: PendingBlocksManager, blocks: seq[Block]) =
blocks: seq[Block]) =
## Resolve pending blocks ## Resolve pending blocks
## ##
@ -73,28 +73,25 @@ proc resolve*(
trace "Resolving block", cid = blk.cid trace "Resolving block", cid = blk.cid
pending[].handle.complete(blk) pending[].handle.complete(blk)
proc setInFlight*( proc setInFlight*(p: PendingBlocksManager,
p: PendingBlocksManager, cid: Cid,
cid: Cid, inFlight = true) =
inFlight = true) =
p.blocks.withValue(cid, pending): p.blocks.withValue(cid, pending):
pending[].inFlight = inFlight pending[].inFlight = inFlight
trace "Setting inflight", cid, inFlight = pending[].inFlight trace "Setting inflight", cid, inFlight = pending[].inFlight
proc isInFlight*( proc isInFlight*(p: PendingBlocksManager,
p: PendingBlocksManager, cid: Cid
cid: Cid): bool = ): bool =
p.blocks.withValue(cid, pending): p.blocks.withValue(cid, pending):
result = pending[].inFlight result = pending[].inFlight
trace "Getting inflight", cid, inFlight = result trace "Getting inflight", cid, inFlight = result
proc pending*( proc pending*(p: PendingBlocksManager, cid: Cid): bool =
p: PendingBlocksManager, cid in p.blocks
cid: Cid): bool = cid in p.blocks
proc contains*( proc contains*(p: PendingBlocksManager, cid: Cid): bool =
p: PendingBlocksManager, p.pending(cid)
cid: Cid): bool = p.pending(cid)
iterator wantList*(p: PendingBlocksManager): Cid = iterator wantList*(p: PendingBlocksManager): Cid =
for k in p.blocks.keys: for k in p.blocks.keys:
@ -107,5 +104,5 @@ iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
func len*(p: PendingBlocksManager): int = func len*(p: PendingBlocksManager): int =
p.blocks.len p.blocks.len
func new*(T: type PendingBlocksManager): T = func new*(T: type PendingBlocksManager): PendingBlocksManager =
T() PendingBlocksManager()

View File

@ -102,12 +102,16 @@ proc handleWantList(
# TODO: make into a template # TODO: make into a template
proc makeWantList*( proc makeWantList*(
cids: seq[Cid], cids: seq[Cid],
priority: int = 0, priority: int = 0,
cancel: bool = false, cancel: bool = false,
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false): Wantlist = sendDontHave: bool = false
): Wantlist =
## make list of wanted entries
##
Wantlist( Wantlist(
entries: cids.mapIt( entries: cids.mapIt(
Entry( Entry(
@ -119,14 +123,15 @@ proc makeWantList*(
full: full) full: full)
proc sendWantList*( proc sendWantList*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerId, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false): Future[void] = sendDontHave: bool = false
): Future[void] =
## Send a want message to peer ## Send a want message to peer
## ##
@ -142,9 +147,10 @@ proc sendWantList*(
b.send(id, Message(wantlist: msg)) b.send(id, Message(wantlist: msg))
proc handleBlocks( proc handleBlocks(
b: BlockExcNetwork, b: BlockExcNetwork,
peer: NetworkPeer, peer: NetworkPeer,
blocks: seq[pb.Block]) {.async.} = blocks: seq[pb.Block]
) {.async.} =
## Handle incoming blocks ## Handle incoming blocks
## ##
@ -203,9 +209,10 @@ proc sendBlockPresence*(
b.send(id, Message(blockPresences: @presence)) b.send(id, Message(blockPresences: @presence))
proc handleAccount( proc handleAccount(
network: BlockExcNetwork, network: BlockExcNetwork,
peer: NetworkPeer, peer: NetworkPeer,
account: Account) {.async.} = account: Account
) {.async.} =
## Handle account info ## Handle account info
## ##
@ -213,34 +220,43 @@ proc handleAccount(
await network.handlers.onAccount(peer.id, account) await network.handlers.onAccount(peer.id, account)
proc sendAccount*( proc sendAccount*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerId, id: PeerId,
account: Account): Future[void] = account: Account
): Future[void] =
## Send account info to remote ## Send account info to remote
## ##
b.send(id, Message(account: AccountMessage.init(account))) b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*( proc sendPayment*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerId, id: PeerId,
payment: SignedState): Future[void] = payment: SignedState
): Future[void] =
## Send payment to remote ## Send payment to remote
## ##
b.send(id, Message(payment: StateChannelUpdate.init(payment))) b.send(id, Message(payment: StateChannelUpdate.init(payment)))
proc handlePayment( proc handlePayment(
network: BlockExcNetwork, network: BlockExcNetwork,
peer: NetworkPeer, peer: NetworkPeer,
payment: SignedState) {.async.} = payment: SignedState
) {.async.} =
## Handle payment ## Handle payment
## ##
if not network.handlers.onPayment.isNil: if not network.handlers.onPayment.isNil:
await network.handlers.onPayment(peer.id, payment) await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} = proc rpcHandler(
b: BlockExcNetwork,
peer: NetworkPeer,
msg: Message
) {.async.} =
## handle rpc messages
##
try: try:
if msg.wantlist.entries.len > 0: if msg.wantlist.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantlist) asyncSpawn b.handleWantList(peer, msg.wantlist)
@ -325,10 +341,11 @@ method init*(b: BlockExcNetwork) =
b.codec = Codec b.codec = Codec
proc new*( proc new*(
T: type BlockExcNetwork, T: type BlockExcNetwork,
switch: Switch, switch: Switch,
connProvider: ConnProvider = nil, connProvider: ConnProvider = nil,
maxInflight = MaxInflight): T = maxInflight = MaxInflight
): BlockExcNetwork =
## Create a new BlockExcNetwork instance ## Create a new BlockExcNetwork instance
## ##

View File

@ -85,7 +85,8 @@ func new*(
T: type NetworkPeer, T: type NetworkPeer,
peer: PeerId, peer: PeerId,
connProvider: ConnProvider, connProvider: ConnProvider,
rpcHandler: RPCHandler): T = rpcHandler: RPCHandler
): NetworkPeer =
doAssert(not isNil(connProvider), doAssert(not isNil(connProvider),
"should supply connection provider") "should supply connection provider")

View File

@ -93,5 +93,5 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
return peers return peers
proc new*(T: type PeerCtxStore): PeerCtxStore = proc new*(T: type PeerCtxStore): PeerCtxStore =
T( ## create new instance of a peer context store
peers: initOrderedTable[PeerId, BlockExcPeerCtx]()) PeerCtxStore(peers: initOrderedTable[PeerId, BlockExcPeerCtx]())

View File

@ -126,11 +126,14 @@ proc `$`*(b: Block): string =
result &= "\ndata: " & string.fromBytes(b.data) result &= "\ndata: " & string.fromBytes(b.data)
func new*( func new*(
T: type Block, T: type Block,
data: openArray[byte] = [], data: openArray[byte] = [],
version = CIDv1, version = CIDv1,
mcodec = multiCodec("sha2-256"), mcodec = multiCodec("sha2-256"),
codec = multiCodec("raw")): ?!T = codec = multiCodec("raw")
): ?!Block =
## creates a new block for both storage and network IO
##
let let
hash = ? MultiHash.digest($mcodec, data).mapFailure hash = ? MultiHash.digest($mcodec, data).mapFailure
@ -143,10 +146,13 @@ func new*(
data: @data).success data: @data).success
func new*( func new*(
T: type Block, T: type Block,
cid: Cid, cid: Cid,
data: openArray[byte], data: openArray[byte],
verify: bool = true): ?!T = verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
##
let let
mhash = ? cid.mhash.mapFailure mhash = ? cid.mhash.mapFailure

View File

@ -60,21 +60,25 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
return move buff return move buff
func new*( func new*(
T: type Chunker, T: type Chunker,
reader: Reader, reader: Reader,
chunkSize = DefaultChunkSize, chunkSize = DefaultChunkSize,
pad = true): T = pad = true
): Chunker =
T(reader: reader, ## create a new Chunker instance
##
Chunker(
reader: reader,
offset: 0, offset: 0,
chunkSize: chunkSize, chunkSize: chunkSize,
pad: pad) pad: pad)
proc new*( proc new*(
T: type LPStreamChunker, T: type LPStreamChunker,
stream: LPStream, stream: LPStream,
chunkSize = DefaultChunkSize, chunkSize = DefaultChunkSize,
pad = true): T = pad = true
): LPStreamChunker =
## create the default File chunker ## create the default File chunker
## ##
@ -92,16 +96,17 @@ proc new*(
return res return res
T.new( LPStreamChunker.new(
reader = reader, reader = reader,
chunkSize = chunkSize, chunkSize = chunkSize,
pad = pad) pad = pad)
proc new*( proc new*(
T: type FileChunker, T: type FileChunker,
file: File, file: File,
chunkSize = DefaultChunkSize, chunkSize = DefaultChunkSize,
pad = true): T = pad = true
): FileChunker =
## create the default File chunker ## create the default File chunker
## ##
@ -123,7 +128,7 @@ proc new*(
return total return total
T.new( FileChunker.new(
reader = reader, reader = reader,
chunkSize = chunkSize, chunkSize = chunkSize,
pad = pad) pad = pad)

View File

@ -51,7 +51,13 @@ type
CodexPrivateKey* = libp2p.PrivateKey # alias CodexPrivateKey* = libp2p.PrivateKey # alias
proc bootstrapInteractions(config: CodexConf, repo: RepoStore): Future[Contracts] {.async.} = proc bootstrapInteractions(
config: CodexConf,
repo: RepoStore
): Future[Contracts] {.async.} =
## bootstrap interactions and return contracts
## using clients, hosts, validators pairings
##
if not config.persistence and not config.validator: if not config.persistence and not config.validator:
if config.ethAccount.isSome: if config.ethAccount.isSome:
@ -150,7 +156,8 @@ proc stop*(s: CodexServer) {.async.} =
proc new*( proc new*(
T: type CodexServer, T: type CodexServer,
config: CodexConf, config: CodexConf,
privateKey: CodexPrivateKey): CodexServer = privateKey: CodexPrivateKey
): CodexServer =
## create CodexServer including setting up datastore, repostore, etc ## create CodexServer including setting up datastore, repostore, etc
let let
switch = SwitchBuilder switch = SwitchBuilder
@ -231,9 +238,10 @@ proc new*(
switch.mount(network) switch.mount(network)
T( CodexServer(
config: config, config: config,
codexNode: codexNode, codexNode: codexNode,
restServer: restServer, restServer: restServer,
repoStore: repoStore, repoStore: repoStore,
maintenance: maintenance) maintenance: maintenance
)

View File

@ -14,10 +14,14 @@ type
sales*: Sales sales*: Sales
proving*: Proving proving*: Proving
proc new*(_: type HostInteractions, proc new*(
clock: OnChainClock, _: type HostInteractions,
sales: Sales, clock: OnChainClock,
proving: Proving): HostInteractions = sales: Sales,
proving: Proving
): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales, proving: proving) HostInteractions(clock: clock, sales: sales, proving: proving)
method start*(self: HostInteractions) {.async.} = method start*(self: HostInteractions) {.async.} =

View File

@ -74,7 +74,7 @@ method getRequest(market: OnChainMarket,
raise e raise e
method requestState*(market: OnChainMarket, method requestState*(market: OnChainMarket,
requestId: RequestId): Future[?RequestState] {.async.} = requestId: RequestId): Future[?RequestState] {.async.} =
try: try:
return some await market.contract.requestState(requestId) return some await market.contract.requestState(requestId)
except ProviderError as e: except ProviderError as e:
@ -100,9 +100,8 @@ method getHost(market: OnChainMarket,
else: else:
return none Address return none Address
method getActiveSlot*( method getActiveSlot*(market: OnChainMarket,
market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
slotId: SlotId): Future[?Slot] {.async.} =
try: try:
return some await market.contract.getActiveSlot(slotId) return some await market.contract.getActiveSlot(slotId)
@ -154,9 +153,11 @@ method markProofAsMissing*(market: OnChainMarket,
period: Period) {.async.} = period: Period) {.async.} =
await market.contract.markProofAsMissing(id, period) await market.contract.markProofAsMissing(id, period)
method canProofBeMarkedAsMissing*(market: OnChainMarket, method canProofBeMarkedAsMissing*(
id: SlotId, market: OnChainMarket,
period: Period): Future[bool] {.async.} = id: SlotId,
period: Period
): Future[bool] {.async.} =
let provider = market.contract.provider let provider = market.contract.provider
let contractWithoutSigner = market.contract.connect(provider) let contractWithoutSigner = market.contract.connect(provider)
let overrides = CallOverrides(blockTag: some BlockTag.pending) let overrides = CallOverrides(blockTag: some BlockTag.pending)

View File

@ -56,25 +56,27 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data) readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*( proc findPeer*(
d: Discovery, d: Discovery,
peerId: PeerId): Future[?PeerRecord] {.async.} = peerId: PeerId
): Future[?PeerRecord] {.async.} =
trace "protocol.resolve..." trace "protocol.resolve..."
## Find peer using the given Discovery object
##
let let
node = await d.protocol.resolve(toNodeId(peerId)) node = await d.protocol.resolve(toNodeId(peerId))
return return
if node.isSome(): if node.isSome():
trace "protocol.resolve some data"
node.get().record.data.some node.get().record.data.some
else: else:
trace "protocol.resolve none"
PeerRecord.none PeerRecord.none
method find*( method find*(
d: Discovery, d: Discovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = cid: Cid
): Future[seq[SignedPeerRecord]] {.async, base.} =
## Find block providers ## Find block providers
## ##
trace "Finding providers for block", cid trace "Finding providers for block", cid
without providers =? without providers =?
@ -98,8 +100,9 @@ method provide*(d: Discovery, cid: Cid) {.async, base.} =
trace "Provided to nodes", nodes = nodes.len trace "Provided to nodes", nodes = nodes.len
method find*( method find*(
d: Discovery, d: Discovery,
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} = host: ca.Address
): Future[seq[SignedPeerRecord]] {.async, base.} =
## Find host providers ## Find host providers
## ##
@ -129,11 +132,20 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} =
if nodes.len > 0: if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len trace "Provided to nodes", nodes = nodes.len
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base.} = method removeProvider*(
d: Discovery,
peerId: PeerId
): Future[void] {.base.} =
## Remove provider from providers table
##
trace "Removing provider", peerId trace "Removing provider", peerId
d.protocol.removeProvidersLocal(peerId) d.protocol.removeProvidersLocal(peerId)
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
##
d.announceAddrs = @addrs d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs trace "Updating announce record", addrs = d.announceAddrs
@ -146,6 +158,9 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
.expect("Should update SPR") .expect("Should update SPR")
proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) = proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
## Update providers record
##
trace "Updating Dht record", ip, port = $port trace "Updating Dht record", ip, port = $port
d.dhtRecord = SignedPeerRecord.init( d.dhtRecord = SignedPeerRecord.init(
d.key, PeerRecord.init(d.peerId, @[ d.key, PeerRecord.init(d.peerId, @[
@ -154,10 +169,6 @@ proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
IpTransportProtocol.udpProtocol, IpTransportProtocol.udpProtocol,
port)])).expect("Should construct signed record").some port)])).expect("Should construct signed record").some
if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord)
.expect("Should update SPR")
proc start*(d: Discovery) {.async.} = proc start*(d: Discovery) {.async.} =
d.protocol.open() d.protocol.open()
await d.protocol.start() await d.protocol.start()
@ -166,17 +177,19 @@ proc stop*(d: Discovery) {.async.} =
await d.protocol.closeWait() await d.protocol.closeWait()
proc new*( proc new*(
T: type Discovery, T: type Discovery,
key: PrivateKey, key: PrivateKey,
bindIp = ValidIpAddress.init(IPv4_any()), bindIp = ValidIpAddress.init(IPv4_any()),
bindPort = 0.Port, bindPort = 0.Port,
announceAddrs: openArray[MultiAddress], announceAddrs: openArray[MultiAddress],
bootstrapNodes: openArray[SignedPeerRecord] = [], bootstrapNodes: openArray[SignedPeerRecord] = [],
store: Datastore = SQLiteDatastore.new(Memory) store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
.expect("Should not fail!")): T = ): Discovery =
## Create a new Discovery node instance for the given key and datastore
##
var var
self = T( self = Discovery(
key: key, key: key,
peerId: PeerId.init(key).expect("Should construct PeerId")) peerId: PeerId.init(key).expect("Should construct PeerId"))

View File

@ -12,8 +12,14 @@ import ./erasure/backends/leopard
export erasure export erasure
func leoEncoderProvider*(size, buffers, parity: int): EncoderBackend {.raises: [Defect].} = func leoEncoderProvider*(
size, buffers, parity: int
): EncoderBackend {.raises: [Defect].} =
## create new Leo Encoder
LeoEncoderBackend.new(size, buffers, parity) LeoEncoderBackend.new(size, buffers, parity)
func leoDecoderProvider*(size, buffers, parity: int): DecoderBackend {.raises: [Defect].} = func leoDecoderProvider*(
LeoDecoderBackend.new(size, buffers, parity) size, buffers, parity: int
): DecoderBackend {.raises: [Defect].} =
## create new Leo Decoder
LeoDecoderBackend.new(size, buffers, parity)

View File

@ -23,17 +23,25 @@ type
DecoderBackend* = ref object of Backend DecoderBackend* = ref object of Backend
method release*(self: Backend) {.base.} = method release*(self: Backend) {.base.} =
## release the backend
##
raiseAssert("not implemented!") raiseAssert("not implemented!")
method encode*( method encode*(
self: EncoderBackend, self: EncoderBackend,
buffers, buffers,
parity: var openArray[seq[byte]]): Result[void, cstring] {.base.} = parity: var openArray[seq[byte]]
): Result[void, cstring] {.base.} =
## encode buffers using a backend
##
raiseAssert("not implemented!") raiseAssert("not implemented!")
method decode*( method decode*(
self: DecoderBackend, self: DecoderBackend,
buffers, buffers,
parity, parity,
recovered: var openArray[seq[byte]]): Result[void, cstring] {.base.} = recovered: var openArray[seq[byte]]
): Result[void, cstring] {.base.} =
## decode buffers using a backend
##
raiseAssert("not implemented!") raiseAssert("not implemented!")

View File

@ -22,9 +22,11 @@ type
decoder*: Option[LeoDecoder] decoder*: Option[LeoDecoder]
method encode*( method encode*(
self: LeoEncoderBackend, self: LeoEncoderBackend,
data, data,
parity: var openArray[seq[byte]]): Result[void, cstring] = parity: var openArray[seq[byte]]
): Result[void, cstring] =
## Encode data using Leopard backend
if parity.len == 0: if parity.len == 0:
return ok() return ok()
@ -41,12 +43,15 @@ method encode*(
encoder.encode(data, parity) encoder.encode(data, parity)
method decode*( method decode*(
self: LeoDecoderBackend, self: LeoDecoderBackend,
data, data,
parity, parity,
recovered: var openArray[seq[byte]]): Result[void, cstring] = recovered: var openArray[seq[byte]]
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder = if self.decoder.isNone: var decoder =
if self.decoder.isNone:
self.decoder = (? LeoDecoder.init( self.decoder = (? LeoDecoder.init(
self.blockSize, self.blockSize,
self.buffers, self.buffers,
@ -65,22 +70,28 @@ method release*(self: LeoDecoderBackend) =
if self.decoder.isSome: if self.decoder.isSome:
self.decoder.get().free() self.decoder.get().free()
func new*( proc new*(
T: type LeoEncoderBackend, T: type LeoEncoderBackend,
blockSize, blockSize,
buffers, buffers,
parity: int): T = parity: int
T( ): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(
blockSize: blockSize, blockSize: blockSize,
buffers: buffers, buffers: buffers,
parity: parity) parity: parity)
func new*( proc new*(
T: type LeoDecoderBackend, T: type LeoDecoderBackend,
blockSize, blockSize,
buffers, buffers,
parity: int): T = parity: int
T( ): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(
blockSize: blockSize, blockSize: blockSize,
buffers: buffers, buffers: buffers,
parity: parity) parity: parity)

View File

@ -63,10 +63,11 @@ type
store*: BlockStore store*: BlockStore
proc encode*( proc encode*(
self: Erasure, self: Erasure,
manifest: Manifest, manifest: Manifest,
blocks: int, blocks: int,
parity: int): Future[?!Manifest] {.async.} = parity: int
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected. ## Encode a manifest into one that is erasure protected.
## ##
## `manifest` - the original manifest to be encoded ## `manifest` - the original manifest to be encoded
@ -153,8 +154,9 @@ proc encode*(
return encoded.success return encoded.success
proc decode*( proc decode*(
self: Erasure, self: Erasure,
encoded: Manifest): Future[?!Manifest] {.async.} = encoded: Manifest
): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original ## Decode a protected manifest into it's original
## manifest ## manifest
## ##
@ -265,10 +267,12 @@ proc stop*(self: Erasure) {.async.} =
return return
proc new*( proc new*(
T: type Erasure, T: type Erasure,
store: BlockStore, store: BlockStore,
encoderProvider: EncoderProvider, encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider): Erasure = decoderProvider: DecoderProvider
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
Erasure( Erasure(
store: store, store: store,

View File

@ -14,8 +14,9 @@ type
CodexResult*[T] = Result[T, ref CodexError] CodexResult*[T] = Result[T, ref CodexError]
template mapFailure*( template mapFailure*(
exp: untyped, exp: untyped,
exc: typed = type CodexError): untyped = exc: typed = type CodexError
): untyped =
## Convert `Result[T, E]` to `Result[E, ref CatchableError]` ## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
## ##

View File

@ -166,8 +166,9 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
self.success self.success
proc encode*( proc encode*(
self: Manifest, self: Manifest,
encoder = ManifestContainers[$DagPBCodec]): ?!seq[byte] = encoder = ManifestContainers[$DagPBCodec]
): ?!seq[byte] =
## Encode a manifest using `encoder` ## Encode a manifest using `encoder`
## ##
@ -177,9 +178,10 @@ proc encode*(
encoder.encode(self) encoder.encode(self)
func decode*( func decode*(
_: type Manifest, _: type Manifest,
data: openArray[byte], data: openArray[byte],
decoder = ManifestContainers[$DagPBCodec]): ?!Manifest = decoder = ManifestContainers[$DagPBCodec]
): ?!Manifest =
## Decode a manifest using `decoder` ## Decode a manifest using `decoder`
## ##

View File

@ -159,14 +159,15 @@ proc cid*(self: Manifest): ?!Cid =
############################################################ ############################################################
proc new*( proc new*(
T: type Manifest, T: type Manifest,
blocks: openArray[Cid] = [], blocks: openArray[Cid] = [],
protected = false, protected = false,
version = CIDv1, version = CIDv1,
hcodec = multiCodec("sha2-256"), hcodec = multiCodec("sha2-256"),
codec = multiCodec("raw"), codec = multiCodec("raw"),
blockSize = BlockSize): ?!T = blockSize = BlockSize
## Create a manifest using array of `Cid`s ): ?!Manifest =
## Create a manifest using an array of `Cid`s
## ##
if hcodec notin EmptyDigests[version]: if hcodec notin EmptyDigests[version]:
@ -182,9 +183,10 @@ proc new*(
protected: protected).success protected: protected).success
proc new*( proc new*(
T: type Manifest, T: type Manifest,
manifest: Manifest, manifest: Manifest,
ecK, ecM: int): ?!Manifest = ecK, ecM: int
): ?!Manifest =
## Create an erasure protected dataset from an ## Create an erasure protected dataset from an
## un-protected one ## un-protected one
## ##
@ -223,5 +225,8 @@ proc new*(
proc new*( proc new*(
T: type Manifest, T: type Manifest,
data: openArray[byte], data: openArray[byte],
decoder = ManifestContainers[$DagPBCodec]): ?!T = decoder = ManifestContainers[$DagPBCodec]
): ?!Manifest =
## Create a manifest instance from given data
##
Manifest.decode(data, decoder) Manifest.decode(data, decoder)

View File

@ -60,18 +60,23 @@ type
proc findPeer*( proc findPeer*(
node: CodexNodeRef, node: CodexNodeRef,
peerId: PeerId): Future[?PeerRecord] {.async.} = peerId: PeerId
): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given CodexNode
##
return await node.discovery.findPeer(peerId) return await node.discovery.findPeer(peerId)
proc connect*( proc connect*(
node: CodexNodeRef, node: CodexNodeRef,
peerId: PeerId, peerId: PeerId,
addrs: seq[MultiAddress]): Future[void] = addrs: seq[MultiAddress]
): Future[void] =
node.switch.connect(peerId, addrs) node.switch.connect(peerId, addrs)
proc fetchManifest*( proc fetchManifest*(
node: CodexNodeRef, node: CodexNodeRef,
cid: Cid): Future[?!Manifest] {.async.} = cid: Cid
): Future[?!Manifest] {.async.} =
## Fetch and decode a manifest block ## Fetch and decode a manifest block
## ##
@ -93,10 +98,11 @@ proc fetchManifest*(
return manifest.success return manifest.success
proc fetchBatched*( proc fetchBatched*(
node: CodexNodeRef, node: CodexNodeRef,
manifest: Manifest, manifest: Manifest,
batchSize = FetchBatch, batchSize = FetchBatch,
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} = onBatch: BatchProc = nil
): Future[?!void] {.async, gcsafe.} =
## Fetch manifest in batches of `batchSize` ## Fetch manifest in batches of `batchSize`
## ##
@ -122,8 +128,9 @@ proc fetchBatched*(
return success() return success()
proc retrieve*( proc retrieve*(
node: CodexNodeRef, node: CodexNodeRef,
cid: Cid): Future[?!LPStream] {.async.} = cid: Cid
): Future[?!LPStream] {.async.} =
## Retrieve by Cid a single block or an entire dataset described by manifest ## Retrieve by Cid a single block or an entire dataset described by manifest
## ##
@ -175,9 +182,10 @@ proc retrieve*(
return failure("Unable to retrieve Cid!") return failure("Unable to retrieve Cid!")
proc store*( proc store*(
self: CodexNodeRef, self: CodexNodeRef,
stream: LPStream, stream: LPStream,
blockSize = BlockSize): Future[?!Cid] {.async.} = blockSize = BlockSize
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize ## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest ## to nodes's BlockStore, and return Cid of its manifest
## ##
@ -238,15 +246,17 @@ proc store*(
return manifest.cid.success return manifest.cid.success
proc requestStorage*(self: CodexNodeRef, proc requestStorage*(
cid: Cid, self: CodexNodeRef,
duration: UInt256, cid: Cid,
proofProbability: UInt256, duration: UInt256,
nodes: uint, proofProbability: UInt256,
tolerance: uint, nodes: uint,
reward: UInt256, tolerance: uint,
collateral: UInt256, reward: UInt256,
expiry = UInt256.none): Future[?!PurchaseId] {.async.} = collateral: UInt256,
expiry = UInt256.none
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might ## Initiate a request for storage sequence, this might
## be a multistep procedure. ## be a multistep procedure.
## ##
@ -311,14 +321,17 @@ proc requestStorage*(self: CodexNodeRef,
return success purchase.id return success purchase.id
proc new*( proc new*(
T: type CodexNodeRef, T: type CodexNodeRef,
switch: Switch, switch: Switch,
store: BlockStore, store: BlockStore,
engine: BlockExcEngine, engine: BlockExcEngine,
erasure: Erasure, erasure: Erasure,
discovery: Discovery, discovery: Discovery,
contracts = Contracts.default): T = contracts = Contracts.default
T( ): CodexNodeRef =
## Create new instance of a Codex node, call `start` to run it
##
CodexNodeRef(
switch: switch, switch: switch,
blockStore: store, blockStore: store,
engine: engine, engine: engine,

View File

@ -47,7 +47,8 @@ proc stop*(purchasing: Purchasing) {.async.} =
discard discard
proc populate*(purchasing: Purchasing, proc populate*(purchasing: Purchasing,
request: StorageRequest): Future[StorageRequest] {.async.} = request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request result = request
if result.ask.proofProbability == 0.u256: if result.ask.proofProbability == 0.u256:
result.ask.proofProbability = purchasing.proofProbability result.ask.proofProbability = purchasing.proofProbability
@ -60,7 +61,8 @@ proc populate*(purchasing: Purchasing,
result.client = await purchasing.market.getSigner() result.client = await purchasing.market.getSigner()
proc purchase*(purchasing: Purchasing, proc purchase*(purchasing: Purchasing,
request: StorageRequest): Future[Purchase] {.async.} = request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request) let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock) let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
purchase.start() purchase.start()

View File

@ -24,10 +24,14 @@ export Purchase
export purchaseid export purchaseid
export statemachine export statemachine
func new*(_: type Purchase, func new*(
requestId: RequestId, _: type Purchase,
market: Market, requestId: RequestId,
clock: Clock): Purchase = market: Market,
clock: Clock
): Purchase =
## create a new instance of a Purchase
##
Purchase( Purchase(
future: Future[void].new(), future: Future[void].new(),
requestId: requestId, requestId: requestId,
@ -35,10 +39,13 @@ func new*(_: type Purchase,
clock: clock clock: clock
) )
func new*(_: type Purchase, func new*(
request: StorageRequest, _: type Purchase,
market: Market, request: StorageRequest,
clock: Clock): Purchase = market: Market,
clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock) let purchase = Purchase.new(request.id, market, clock)
purchase.request = some request purchase.request = some request
return purchase return purchase

View File

@ -18,7 +18,10 @@ type
nodes*: ?uint nodes*: ?uint
tolerance*: ?uint tolerance*: ?uint
proc fromJson*(_: type Availability, bytes: seq[byte]): ?!Availability = proc fromJson*(
_: type Availability,
bytes: seq[byte]
): ?!Availability =
let json = ?catch parseJson(string.fromBytes(bytes)) let json = ?catch parseJson(string.fromBytes(bytes))
let size = ?catch UInt256.fromDecimal(json["size"].getStr) let size = ?catch UInt256.fromDecimal(json["size"].getStr)
let duration = ?catch UInt256.fromDecimal(json["duration"].getStr) let duration = ?catch UInt256.fromDecimal(json["duration"].getStr)
@ -26,8 +29,10 @@ proc fromJson*(_: type Availability, bytes: seq[byte]): ?!Availability =
let maxCollateral = ?catch UInt256.fromDecimal(json["maxCollateral"].getStr) let maxCollateral = ?catch UInt256.fromDecimal(json["maxCollateral"].getStr)
success Availability.init(size, duration, minPrice, maxCollateral) success Availability.init(size, duration, minPrice, maxCollateral)
proc fromJson*(_: type StorageRequestParams, proc fromJson*(
bytes: seq[byte]): ?! StorageRequestParams = _: type StorageRequestParams,
bytes: seq[byte]
): ?! StorageRequestParams =
let json = ?catch parseJson(string.fromBytes(bytes)) let json = ?catch parseJson(string.fromBytes(bytes))
let duration = ?catch UInt256.fromDecimal(json["duration"].getStr) let duration = ?catch UInt256.fromDecimal(json["duration"].getStr)
let proofProbability = ?catch UInt256.fromDecimal(json["proofProbability"].getStr) let proofProbability = ?catch UInt256.fromDecimal(json["proofProbability"].getStr)

View File

@ -58,9 +58,8 @@ const
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
ReservationsKey = (SalesKey / "reservations").tryGet ReservationsKey = (SalesKey / "reservations").tryGet
proc new*( proc new*(T: type Reservations,
T: type Reservations, repo: RepoStore): Reservations =
repo: RepoStore): Reservations =
T(repo: repo) T(repo: repo)

View File

@ -321,10 +321,11 @@ proc generateQuery*(tau: Tau, l: int): seq[QElement] =
result.add(q) result.add(q)
proc generateProof*( proc generateProof*(
stream: SeekableStream, stream: SeekableStream,
q: seq[QElement], q: seq[QElement],
authenticators: seq[blst_p1], authenticators: seq[blst_p1],
s: int64): Future[Proof] {.async.} = s: int64
): Future[Proof] {.async.} =
## Generata BLS proofs for a given query ## Generata BLS proofs for a given query
## ##
@ -432,11 +433,12 @@ proc verifyProof*(
return verifyPairings(sum, self.spk.key, sigma, g) return verifyPairings(sum, self.spk.key, sigma, g)
proc init*( proc init*(
T: type PoR, T: type PoR,
stream: SeekableStream, stream: SeekableStream,
ssk: SecretKey, ssk: SecretKey,
spk: PublicKey, spk: PublicKey,
blockSize: int64): Future[PoR] {.async.} = blockSize: int64
): Future[PoR] {.async.} =
## Set up the POR scheme by generating tags and metadata ## Set up the POR scheme by generating tags and metadata
## ##

View File

@ -32,10 +32,11 @@ type
stpStore*: StpStore stpStore*: StpStore
proc upload*( proc upload*(
self: StorageProofs, self: StorageProofs,
cid: Cid, cid: Cid,
indexes: seq[int], indexes: seq[int],
host: ca.Address): Future[?!void] {.async.} = host: ca.Address
): Future[?!void] {.async.} =
## Upload authenticators ## Upload authenticators
## ##
@ -56,8 +57,9 @@ proc upload*(
# discard # discard
proc setupProofs*( proc setupProofs*(
self: StorageProofs, self: StorageProofs,
manifest: Manifest): Future[?!void] {.async.} = manifest: Manifest
): Future[?!void] {.async.} =
## Setup storage authentication ## Setup storage authentication
## ##
@ -75,10 +77,11 @@ proc setupProofs*(
return await self.stpStore.store(por.toMessage(), cid) return await self.stpStore.store(por.toMessage(), cid)
proc init*( proc init*(
T: type StorageProofs, T: type StorageProofs,
network: StpNetwork, network: StpNetwork,
store: BlockStore, store: BlockStore,
stpStore: StpStore): StorageProofs = stpStore: StpStore
): StorageProofs =
var var
self = T( self = T(

View File

@ -36,11 +36,12 @@ type
tagsHandle*: TagsHandler tagsHandle*: TagsHandler
proc uploadTags*( proc uploadTags*(
self: StpNetwork, self: StpNetwork,
cid: Cid, cid: Cid,
indexes: seq[int], indexes: seq[int],
tags: seq[seq[byte]], tags: seq[seq[byte]],
host: ca.Address): Future[?!void] {.async.} = host: ca.Address
): Future[?!void] {.async.} =
# Upload tags to `host` # Upload tags to `host`
# #
@ -93,7 +94,9 @@ method init*(self: StpNetwork) =
proc new*( proc new*(
T: type StpNetwork, T: type StpNetwork,
switch: Switch, switch: Switch,
discovery: Discovery): StpNetwork = discovery: Discovery
): StpNetwork =
## create a new StpNetwork instance
let let
self = StpNetwork( self = StpNetwork(
switch: switch, switch: switch,

View File

@ -33,7 +33,8 @@ template stpPath*(self: StpStore, cid: Cid): string =
proc retrieve*( proc retrieve*(
self: StpStore, self: StpStore,
cid: Cid): Future[?!PorMessage] {.async.} = cid: Cid
): Future[?!PorMessage] {.async.} =
## Retrieve authenticators from data store ## Retrieve authenticators from data store
## ##
@ -51,7 +52,8 @@ proc retrieve*(
proc store*( proc store*(
self: StpStore, self: StpStore,
por: PorMessage, por: PorMessage,
cid: Cid): Future[?!void] {.async.} = cid: Cid
): Future[?!void] {.async.} =
## Persist storage proofs ## Persist storage proofs
## ##
@ -74,9 +76,10 @@ proc store*(
return success() return success()
proc retrieve*( proc retrieve*(
self: StpStore, self: StpStore,
cid: Cid, cid: Cid,
blocks: seq[int]): Future[?!seq[Tag]] {.async.} = blocks: seq[int]
): Future[?!seq[Tag]] {.async.} =
var tags: seq[Tag] var tags: seq[Tag]
for b in blocks: for b in blocks:
var tag = Tag(idx: b) var tag = Tag(idx: b)
@ -92,9 +95,10 @@ proc retrieve*(
return tags.success return tags.success
proc store*( proc store*(
self: StpStore, self: StpStore,
tags: seq[Tag], tags: seq[Tag],
cid: Cid): Future[?!void] {.async.} = cid: Cid
): Future[?!void] {.async.} =
let let
dir = self.stpPath(cid) dir = self.stpPath(cid)
@ -117,7 +121,10 @@ proc store*(
proc init*( proc init*(
T: type StpStore, T: type StpStore,
authDir: string, authDir: string,
postfixLen: int = 2): StpStore = postfixLen: int = 2
T( ): StpStore =
## Init StpStore
##
StpStore(
authDir: authDir, authDir: authDir,
postfixLen: postfixLen) postfixLen: postfixLen)

View File

@ -45,9 +45,10 @@ method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
raiseAssert("Not implemented!") raiseAssert("Not implemented!")
method putBlock*( method putBlock*(
self: BlockStore, self: BlockStore,
blk: Block, blk: Block,
ttl = Duration.none): Future[?!void] {.base.} = ttl = Duration.none
): Future[?!void] {.base.} =
## Put a block to the blockstore ## Put a block to the blockstore
## ##

View File

@ -79,8 +79,9 @@ func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
yield cid yield cid
method listBlocks*( method listBlocks*(
self: CacheStore, self: CacheStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} = blockType = BlockType.Manifest
): Future[?!BlocksIter] {.async.} =
## Get the list of blocks in the BlockStore. This is an intensive operation ## Get the list of blocks in the BlockStore. This is an intensive operation
## ##
@ -193,7 +194,11 @@ func new*(
blocks: openArray[Block] = [], blocks: openArray[Block] = [],
cacheSize: Positive = DefaultCacheSize, # in bytes cacheSize: Positive = DefaultCacheSize, # in bytes
chunkSize: Positive = DefaultChunkSize # in bytes chunkSize: Positive = DefaultChunkSize # in bytes
): CacheStore {.raises: [Defect, ValueError].} = ): CacheStore {.raises: [Defect, ValueError].} =
## Create a new CacheStore instance
##
## `cacheSize` and `chunkSize` are both in bytes
##
if cacheSize < chunkSize: if cacheSize < chunkSize:
raise newException(ValueError, "cacheSize cannot be less than chunkSize") raise newException(ValueError, "cacheSize cannot be less than chunkSize")

View File

@ -33,14 +33,19 @@ type
numberOfBlocksPerInterval: int numberOfBlocksPerInterval: int
offset: int offset: int
proc new*(T: type BlockMaintainer, proc new*(
T: type BlockMaintainer,
repoStore: RepoStore, repoStore: RepoStore,
interval: Duration, interval: Duration,
numberOfBlocksPerInterval = 100, numberOfBlocksPerInterval = 100,
timer = Timer.new(), timer = Timer.new(),
clock: Clock = SystemClock.new() clock: Clock = SystemClock.new()
): T = ): BlockMaintainer =
T( ## Create new BlockMaintainer instance
##
## Call `start` to begin looking for for expired blocks
##
BlockMaintainer(
repoStore: repoStore, repoStore: repoStore,
interval: interval, interval: interval,
numberOfBlocksPerInterval: numberOfBlocksPerInterval, numberOfBlocksPerInterval: numberOfBlocksPerInterval,

View File

@ -47,9 +47,10 @@ method getBlock*(self: NetworkStore, cid: Cid): Future[?!bt.Block] {.async.} =
return success blk return success blk
method putBlock*( method putBlock*(
self: NetworkStore, self: NetworkStore,
blk: bt.Block, blk: bt.Block,
ttl = Duration.none): Future[?!void] {.async.} = ttl = Duration.none
): Future[?!void] {.async.} =
## Store block locally and notify the network ## Store block locally and notify the network
## ##
@ -88,11 +89,10 @@ method close*(self: NetworkStore): Future[void] {.async.} =
proc new*( proc new*(
T: type NetworkStore, T: type NetworkStore,
engine: BlockExcEngine, engine: BlockExcEngine,
localStore: BlockStore): T = localStore: BlockStore
): NetworkStore =
let ## Create new instance of a NetworkStore
self = NetworkStore( ##
NetworkStore(
localStore: localStore, localStore: localStore,
engine: engine) engine: engine)
return self

View File

@ -92,7 +92,13 @@ proc getBlockExpirationTimestamp(self: RepoStore, ttl: ?Duration): SecondsSince1
let duration = ttl |? self.blockTtl let duration = ttl |? self.blockTtl
self.clock.now() + duration.seconds self.clock.now() + duration.seconds
proc getBlockExpirationEntry(self: RepoStore, batch: var seq[BatchEntry], cid: Cid, ttl: ?Duration): ?!BatchEntry = proc getBlockExpirationEntry(
self: RepoStore,
batch: var seq[BatchEntry],
cid: Cid,
ttl: ?Duration
): ?!BatchEntry =
## Get an expiration entry for a batch
without key =? createBlockExpirationMetadataKey(cid), err: without key =? createBlockExpirationMetadataKey(cid), err:
return failure(err) return failure(err)
@ -100,9 +106,10 @@ proc getBlockExpirationEntry(self: RepoStore, batch: var seq[BatchEntry], cid: C
return success((key, value)) return success((key, value))
method putBlock*( method putBlock*(
self: RepoStore, self: RepoStore,
blk: Block, blk: Block,
ttl = Duration.none): Future[?!void] {.async.} = ttl = Duration.none
): Future[?!void] {.async.} =
## Put a block to the blockstore ## Put a block to the blockstore
## ##
@ -201,8 +208,9 @@ method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
return await self.repoDs.has(key) return await self.repoDs.has(key)
method listBlocks*( method listBlocks*(
self: RepoStore, self: RepoStore,
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} = blockType = BlockType.Manifest
): Future[?!BlocksIter] {.async.} =
## Get the list of blocks in the RepoStore. ## Get the list of blocks in the RepoStore.
## This is an intensive operation ## This is an intensive operation
## ##
@ -237,7 +245,13 @@ proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query =
let queryKey = ? createBlockExpirationMetadataQueryKey() let queryKey = ? createBlockExpirationMetadataQueryKey()
success Query.init(queryKey, offset = offset, limit = maxNumber) success Query.init(queryKey, offset = offset, limit = maxNumber)
method getBlockExpirations*(self: RepoStore, maxNumber: int, offset: int): Future[?!BlockExpirationIter] {.async, base.} = method getBlockExpirations*(
self: RepoStore,
maxNumber: int,
offset: int
): Future[?!BlockExpirationIter] {.async, base.} =
## Get block experiartions from the given RepoStore
##
without query =? createBlockExpirationQuery(maxNumber, offset), err: without query =? createBlockExpirationQuery(maxNumber, offset), err:
trace "Unable to format block expirations query" trace "Unable to format block expirations query"
return failure(err) return failure(err)
@ -388,14 +402,17 @@ proc stop*(self: RepoStore): Future[void] {.async.} =
self.started = false self.started = false
func new*( func new*(
T: type RepoStore, T: type RepoStore,
repoDs: Datastore, repoDs: Datastore,
metaDs: Datastore, metaDs: Datastore,
clock: Clock = SystemClock.new(), clock: Clock = SystemClock.new(),
postFixLen = 2, postFixLen = 2,
quotaMaxBytes = DefaultQuotaBytes, quotaMaxBytes = DefaultQuotaBytes,
blockTtl = DefaultBlockTtl): T = blockTtl = DefaultBlockTtl
T( ): RepoStore =
## Create new instance of a RepoStore
##
RepoStore(
repoDs: repoDs, repoDs: repoDs,
metaDs: metaDs, metaDs: metaDs,
clock: clock, clock: clock,

View File

@ -32,9 +32,12 @@ method initStream*(self: AsyncStreamWrapper) =
procCall LPStream(self).initStream() procCall LPStream(self).initStream()
proc new*( proc new*(
C: type AsyncStreamWrapper, C: type AsyncStreamWrapper,
reader: AsyncStreamReader = nil, reader: AsyncStreamReader = nil,
writer: AsyncStreamWriter = nil): AsyncStreamWrapper = writer: AsyncStreamWriter = nil
): AsyncStreamWrapper =
## Create new instance of an asynchronous stream wrapper
##
let let
stream = C(reader: reader, writer: writer) stream = C(reader: reader, writer: writer)
@ -57,9 +60,10 @@ template withExceptions(body: untyped) =
raise newException(LPStreamError, exc.msg) raise newException(LPStreamError, exc.msg)
method readOnce*( method readOnce*(
self: AsyncStreamWrapper, self: AsyncStreamWrapper,
pbytes: pointer, pbytes: pointer,
nbytes: int): Future[int] {.async.} = nbytes: int
): Future[int] {.async.} =
trace "Reading bytes from reader", bytes = nbytes trace "Reading bytes from reader", bytes = nbytes
if isNil(self.reader): if isNil(self.reader):
@ -75,7 +79,8 @@ method readOnce*(
proc completeWrite( proc completeWrite(
self: AsyncStreamWrapper, self: AsyncStreamWrapper,
fut: Future[void], fut: Future[void],
msgLen: int): Future[void] {.async.} = msgLen: int
): Future[void] {.async.} =
withExceptions: withExceptions:
await fut await fut

View File

@ -38,12 +38,14 @@ type
pad*: bool # Pad last block to manifest.blockSize? pad*: bool # Pad last block to manifest.blockSize?
proc new*( proc new*(
T: type StoreStream, T: type StoreStream,
store: BlockStore, store: BlockStore,
manifest: Manifest, manifest: Manifest,
pad = true): T = pad = true
): StoreStream =
result = T( ## Create a new StoreStream instance for a given store and manifest
##
result = StoreStream(
store: store, store: store,
manifest: manifest, manifest: manifest,
pad: pad, pad: pad,
@ -62,12 +64,14 @@ method atEof*(self: StoreStream): bool =
self.offset >= self.size self.offset >= self.size
method readOnce*( method readOnce*(
self: StoreStream, self: StoreStream,
pbytes: pointer, pbytes: pointer,
nbytes: int): Future[int] {.async.} = nbytes: int
): Future[int] {.async.} =
## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`. ## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`.
## Return how many bytes were actually read before EOF was encountered. ## Return how many bytes were actually read before EOF was encountered.
## Raise exception if we are already at EOF. ## Raise exception if we are already at EOF.
##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len
if self.atEof: if self.atEof:

View File

@ -17,9 +17,10 @@ import pkg/libp2p
import pkg/stew/shims/net import pkg/stew/shims/net
func remapAddr*( func remapAddr*(
address: MultiAddress, address: MultiAddress,
ip: Option[ValidIpAddress] = ValidIpAddress.none, ip: Option[ValidIpAddress] = ValidIpAddress.none,
port: Option[Port] = Port.none): MultiAddress = port: Option[Port] = Port.none
): MultiAddress =
## Remap addresses to new IP and/or Port ## Remap addresses to new IP and/or Port
## ##

View File

@ -34,8 +34,9 @@ type
Empty, Full Empty, Full
proc newAsyncHeapQueue*[T]( proc newAsyncHeapQueue*[T](
maxsize: int = 0, maxsize: int = 0,
queueType: QueueType = QueueType.Min): AsyncHeapQueue[T] = queueType: QueueType = QueueType.Min
): AsyncHeapQueue[T] =
## Creates a new asynchronous queue ``AsyncHeapQueue``. ## Creates a new asynchronous queue ``AsyncHeapQueue``.
## ##

View File

@ -22,10 +22,9 @@ type
name: string name: string
loopFuture: Future[void] loopFuture: Future[void]
proc new*(T: type Timer, timerName = "Unnamed Timer"): T = proc new*(T: type Timer, timerName = "Unnamed Timer"): Timer =
T( ## Create a new Timer intance with the given name
name: timerName Timer(name: timerName)
)
proc timerLoop(timer: Timer) {.async.} = proc timerLoop(timer: Timer) {.async.} =
try: try:

View File

@ -22,10 +22,13 @@ type
logScope: logScope:
topics = "codex validator" topics = "codex validator"
proc new*(_: type Validation, proc new*(
clock: Clock, _: type Validation,
market: Market, clock: Clock,
maxSlots: int): Validation = market: Market,
maxSlots: int
): Validation =
## Create a new Validation instance
Validation(clock: clock, market: market, maxSlots: maxSlots) Validation(clock: clock, market: market, maxSlots: maxSlots)
proc slots*(validation: Validation): seq[SlotId] = proc slots*(validation: Validation): seq[SlotId] =

View File

@ -31,9 +31,10 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
return buf return buf
proc corruptBlocks*( proc corruptBlocks*(
store: BlockStore, store: BlockStore,
manifest: Manifest, manifest: Manifest,
blks, bytes: int): Future[seq[int]] {.async.} = blks, bytes: int
): Future[seq[int]] {.async.} =
var pos: seq[int] var pos: seq[int]
doAssert blks < manifest.len doAssert blks < manifest.len

View File

@ -12,8 +12,11 @@ type
until: SecondsSince1970 until: SecondsSince1970
future: Future[void] future: Future[void]
func new*(_: type MockClock, func new*(
time: SecondsSince1970 = getTime().toUnix): MockClock = _: type MockClock,
time: SecondsSince1970 = getTime().toUnix
): MockClock =
## Create a mock clock instance
MockClock(time: time) MockClock(time: time)
proc set*(clock: MockClock, time: SecondsSince1970) = proc set*(clock: MockClock, time: SecondsSince1970) =

View File

@ -24,17 +24,21 @@ type
publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address):
Future[void] {.gcsafe.} Future[void] {.gcsafe.}
proc new*(T: type MockDiscovery): T = proc new*(T: type MockDiscovery): MockDiscovery =
T() MockDiscovery()
proc findPeer*( proc findPeer*(
d: Discovery, d: Discovery,
peerId: PeerId): Future[?PeerRecord] {.async.} = peerId: PeerId
): Future[?PeerRecord] {.async.} =
## mock find a peer - always return none
##
return none(PeerRecord) return none(PeerRecord)
method find*( method find*(
d: MockDiscovery, d: MockDiscovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = cid: Cid
): Future[seq[SignedPeerRecord]] {.async.} =
if isNil(d.findBlockProvidersHandler): if isNil(d.findBlockProvidersHandler):
return return
@ -47,8 +51,9 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
await d.publishBlockProvideHandler(d, cid) await d.publishBlockProvideHandler(d, cid)
method find*( method find*(
d: MockDiscovery, d: MockDiscovery,
host: ca.Address): Future[seq[SignedPeerRecord]] {.async.} = host: ca.Address
): Future[seq[SignedPeerRecord]] {.async.} =
if isNil(d.findHostProvidersHandler): if isNil(d.findHostProvidersHandler):
return return

View File

@ -82,6 +82,8 @@ proc hash*(requestId: RequestId): Hash =
hash(requestId.toArray) hash(requestId.toArray)
proc new*(_: type MockMarket): MockMarket = proc new*(_: type MockMarket): MockMarket =
## Create a new mocked Market instance
##
let config = MarketplaceConfig( let config = MarketplaceConfig(
collateral: CollateralConfig( collateral: CollateralConfig(
repairRewardPercentage: 10, repairRewardPercentage: 10,
@ -136,7 +138,7 @@ method getActiveSlot*(
return none Slot return none Slot
method requestState*(market: MockMarket, method requestState*(market: MockMarket,
requestId: RequestId): Future[?RequestState] {.async.} = requestId: RequestId): Future[?RequestState] {.async.} =
return market.requestState.?[requestId] return market.requestState.?[requestId]
method slotState*(market: MockMarket, method slotState*(market: MockMarket,
@ -320,9 +322,9 @@ method subscribeSlotFreed*(market: MockMarket,
return subscription return subscription
method subscribeRequestCancelled*(market: MockMarket, method subscribeRequestCancelled*(market: MockMarket,
requestId: RequestId, requestId: RequestId,
callback: OnRequestCancelled): callback: OnRequestCancelled):
Future[Subscription] {.async.} = Future[Subscription] {.async.} =
let subscription = RequestCancelledSubscription( let subscription = RequestCancelledSubscription(
market: market, market: market,
requestId: requestId, requestId: requestId,

View File

@ -18,13 +18,18 @@ type
mockInterval*: Duration mockInterval*: Duration
mockCallback: timer.TimerCallback mockCallback: timer.TimerCallback
proc new*(T: type MockTimer): T = proc new*(T: type MockTimer): MockTimer =
T( ## Create a mocked Timer instance
MockTimer(
startCalled: 0, startCalled: 0,
stopCalled: 0 stopCalled: 0
) )
method start*(mockTimer: MockTimer, callback: timer.TimerCallback, interval: Duration) = method start*(
mockTimer: MockTimer,
callback: timer.TimerCallback,
interval: Duration
) =
mockTimer.mockCallback = callback mockTimer.mockCallback = callback
mockTimer.mockInterval = interval mockTimer.mockInterval = interval
inc mockTimer.startCalled inc mockTimer.startCalled

View File

@ -24,8 +24,9 @@ type
networkStore: NetworkStore] networkStore: NetworkStore]
proc generateNodes*( proc generateNodes*(
num: Natural, num: Natural,
blocks: openArray[bt.Block] = []): seq[NodesComponents] = blocks: openArray[bt.Block] = []
): seq[NodesComponents] =
for i in 0..<num: for i in 0..<num:
let let
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr}) switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})

View File

@ -11,13 +11,13 @@ type
RandomChunker* = Chunker RandomChunker* = Chunker
proc new*( proc new*(
T: type RandomChunker, T: type RandomChunker,
rng: Rng, rng: Rng,
chunkSize = DefaultChunkSize, chunkSize = DefaultChunkSize,
size: int, size: int,
pad = false): T = pad = false
## create a chunker that produces ): RandomChunker =
## random data ## Create a chunker that produces random data
## ##
var consumed = 0 var consumed = 0

View File

@ -18,11 +18,10 @@ type
Before* = proc(): Future[void] {.gcsafe.} Before* = proc(): Future[void] {.gcsafe.}
After* = proc(): Future[void] {.gcsafe.} After* = proc(): Future[void] {.gcsafe.}
proc commonBlockStoreTests*( proc commonBlockStoreTests*(name: string,
name: string, provider: StoreProvider,
provider: StoreProvider, before: Before = nil,
before: Before = nil, after: After = nil) =
after: After = nil) =
suite name & " Store Common": suite name & " Store Common":
var var

View File

@ -26,13 +26,17 @@ proc upload*(client: CodexClient, contents: string): string =
assert response.status == "200 OK" assert response.status == "200 OK"
response.body response.body
proc requestStorage*(client: CodexClient, proc requestStorage*(
cid: string, client: CodexClient,
duration: uint64, cid: string,
reward: uint64, duration: uint64,
proofProbability: uint64, reward: uint64,
expiry: UInt256, proofProbability: uint64,
collateral: uint64): string = expiry: UInt256,
collateral: uint64
): string =
## Call request storage REST endpoint
##
let url = client.baseurl & "/storage/request/" & cid let url = client.baseurl & "/storage/request/" & cid
let json = %*{ let json = %*{
"duration": $duration, "duration": $duration,
@ -55,8 +59,13 @@ proc getSlots*(client: CodexClient): JsonNode =
let body = client.http.getContent(url) let body = client.http.getContent(url)
parseJson(body).catch |? nil parseJson(body).catch |? nil
proc postAvailability*(client: CodexClient, proc postAvailability*(
size, duration, minPrice: uint64, maxCollateral: uint64): JsonNode = client: CodexClient,
size, duration, minPrice: uint64,
maxCollateral: uint64
): JsonNode =
## Post sales availability endpoint
##
let url = client.baseurl & "/sales/availability" let url = client.baseurl & "/sales/availability"
let json = %*{ let json = %*{
"size": $size, "size": $size,
@ -69,6 +78,7 @@ proc postAvailability*(client: CodexClient,
parseJson(response.body) parseJson(response.body)
proc getAvailabilities*(client: CodexClient): JsonNode = proc getAvailabilities*(client: CodexClient): JsonNode =
## Call sales availability REST endpoint
let url = client.baseurl & "/sales/availability" let url = client.baseurl & "/sales/availability"
let body = client.http.getContent(url) let body = client.http.getContent(url)
parseJson(body) parseJson(body)