Enable stylecheck (#353)

* applying styleCheck

* stuck on vendor folder

* Applies style check

* Turns styleCheck back off

* switches to stylecheck:usages

* Fixes empty template casing

* rolls up nim-blscurve, nim-datastore, nim-ethers, nim-leopard, and nim-taskpools.

* bumps nim-confutils and removes unused import from fileutils.nim

* Unused using in fileutils.nim is required by CI

* Reverts bump of nim-confutils module
This commit is contained in:
Ben Bierens 2023-03-10 08:02:54 +01:00 committed by GitHub
parent 7a0a48e4a5
commit da79660f8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 176 additions and 174 deletions

View File

@ -30,7 +30,7 @@ import ./pendingblocks
logScope: logScope:
topics = "codex discoveryengine" topics = "codex discoveryengine"
declareGauge(codex_inflight_discovery, "inflight discovery requests") declareGauge(codexInflightDiscovery, "inflight discovery requests")
const const
DefaultConcurrentDiscRequests = 10 DefaultConcurrentDiscRequests = 10
@ -109,13 +109,13 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
request = b.discovery.provide(cid) request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request b.inFlightAdvReqs[cid] = request
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64) codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
trace "Advertising block", cid, inflight = b.inFlightAdvReqs.len trace "Advertising block", cid, inflight = b.inFlightAdvReqs.len
await request await request
finally: finally:
b.inFlightAdvReqs.del(cid) b.inFlightAdvReqs.del(cid)
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64) codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
trace "Advertised block", cid, inflight = b.inFlightAdvReqs.len trace "Advertised block", cid, inflight = b.inFlightAdvReqs.len
except CatchableError as exc: except CatchableError as exc:
trace "Exception in advertise task runner", exc = exc.msg trace "Exception in advertise task runner", exc = exc.msg
@ -148,7 +148,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
.wait(DefaultDiscoveryTimeout) .wait(DefaultDiscoveryTimeout)
b.inFlightDiscReqs[cid] = request b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64) codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
let let
peers = await request peers = await request
@ -163,7 +163,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
finally: finally:
b.inFlightDiscReqs.del(cid) b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64) codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
except CatchableError as exc: except CatchableError as exc:
trace "Exception in discovery task runner", exc = exc.msg trace "Exception in discovery task runner", exc = exc.msg
@ -217,16 +217,16 @@ proc stop*(b: DiscoveryEngine) {.async.} =
return return
b.discEngineRunning = false b.discEngineRunning = false
for t in b.advertiseTasks: for task in b.advertiseTasks:
if not t.finished: if not task.finished:
trace "Awaiting advertise task to stop" trace "Awaiting advertise task to stop"
await t.cancelAndWait() await task.cancelAndWait()
trace "Advertise task stopped" trace "Advertise task stopped"
for t in b.discoveryTasks: for task in b.discoveryTasks:
if not t.finished: if not task.finished:
trace "Awaiting discovery task to stop" trace "Awaiting discovery task to stop"
await t.cancelAndWait() await task.cancelAndWait()
trace "Discovery task stopped" trace "Discovery task stopped"
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished: if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:

View File

@ -108,10 +108,10 @@ proc stop*(b: BlockExcEngine) {.async.} =
return return
b.blockexcRunning = false b.blockexcRunning = false
for t in b.blockexcTasks: for task in b.blockexcTasks:
if not t.finished: if not task.finished:
trace "Awaiting task to stop" trace "Awaiting task to stop"
await t.cancelAndWait() await task.cancelAndWait()
trace "Task stopped" trace "Task stopped"
trace "NetworkStore stopped" trace "NetworkStore stopped"
@ -316,7 +316,7 @@ proc blocksHandler*(
proc wantListHandler*( proc wantListHandler*(
b: BlockExcEngine, b: BlockExcEngine,
peer: PeerId, peer: PeerId,
wantList: WantList) {.async.} = wantList: Wantlist) {.async.} =
## Handle incoming want lists ## Handle incoming want lists
## ##
@ -530,7 +530,7 @@ proc new*(
proc blockWantListHandler( proc blockWantListHandler(
peer: PeerId, peer: PeerId,
wantList: WantList): Future[void] {.gcsafe.} = wantList: Wantlist): Future[void] {.gcsafe.} =
engine.wantListHandler(peer, wantList) engine.wantListHandler(peer, wantList)
proc blockPresenceHandler( proc blockPresenceHandler(

View File

@ -34,13 +34,13 @@ const
MaxInflight* = 100 MaxInflight* = 100
type type
WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.} WantListHandler* = proc(peer: PeerId, wantList: Wantlist): Future[void] {.gcsafe.}
BlocksHandler* = proc(peer: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} BlocksHandler* = proc(peer: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerID, precense: seq[BlockPresence]): Future[void] {.gcsafe.} BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.} AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.} PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
WantListSender* = proc( WantListSender* = proc(
id: PeerID, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
@ -55,10 +55,10 @@ type
onAccount*: AccountHandler onAccount*: AccountHandler
onPayment*: PaymentHandler onPayment*: PaymentHandler
BlocksSender* = proc(peer: PeerID, presence: seq[bt.Block]): Future[void] {.gcsafe.} BlocksSender* = proc(peer: PeerId, presence: seq[bt.Block]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.} AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.} PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
BlockExcRequest* = object BlockExcRequest* = object
sendWantList*: WantListSender sendWantList*: WantListSender
@ -68,7 +68,7 @@ type
sendPayment*: PaymentSender sendPayment*: PaymentSender
BlockExcNetwork* = ref object of LPProtocol BlockExcNetwork* = ref object of LPProtocol
peers*: Table[PeerID, NetworkPeer] peers*: Table[PeerId, NetworkPeer]
switch*: Switch switch*: Switch
handlers*: BlockExcHandlers handlers*: BlockExcHandlers
request*: BlockExcRequest request*: BlockExcRequest
@ -92,7 +92,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc handleWantList( proc handleWantList(
b: BlockExcNetwork, b: BlockExcNetwork,
peer: NetworkPeer, peer: NetworkPeer,
list: WantList) {.async.} = list: Wantlist) {.async.} =
## Handle incoming want list ## Handle incoming want list
## ##
@ -107,8 +107,8 @@ proc makeWantList*(
cancel: bool = false, cancel: bool = false,
wantType: WantType = WantType.WantHave, wantType: WantType = WantType.WantHave,
full: bool = false, full: bool = false,
sendDontHave: bool = false): WantList = sendDontHave: bool = false): Wantlist =
WantList( Wantlist(
entries: cids.mapIt( entries: cids.mapIt(
Entry( Entry(
`block`: it.data.buffer, `block`: it.data.buffer,
@ -120,7 +120,7 @@ proc makeWantList*(
proc sendWantList*( proc sendWantList*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerID, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
@ -175,7 +175,7 @@ template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
proc sendBlocks*( proc sendBlocks*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerID, id: PeerId,
blocks: seq[bt.Block]): Future[void] = blocks: seq[bt.Block]): Future[void] =
## Send blocks to remote ## Send blocks to remote
## ##
@ -195,7 +195,7 @@ proc handleBlockPresence(
proc sendBlockPresence*( proc sendBlockPresence*(
b: BlockExcNetwork, b: BlockExcNetwork,
id: PeerID, id: PeerId,
presence: seq[BlockPresence]): Future[void] = presence: seq[BlockPresence]): Future[void] =
## Send presence to remote ## Send presence to remote
## ##
@ -260,7 +260,7 @@ proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} =
except CatchableError as exc: except CatchableError as exc:
trace "Exception in blockexc rpc handler", exc = exc.msg trace "Exception in blockexc rpc handler", exc = exc.msg
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer = proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer ## Creates or retrieves a BlockExcNetwork Peer
## ##
@ -287,7 +287,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
return blockExcPeer return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerID) = proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want ## Perform initial setup, such as want
## list exchange ## list exchange
## ##
@ -297,7 +297,7 @@ proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} = proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address)) await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerID) = proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
## Cleanup disconnected peer ## Cleanup disconnected peer
## ##
@ -307,7 +307,7 @@ method init*(b: BlockExcNetwork) =
## Perform protocol initialization ## Perform protocol initialization
## ##
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} = proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined: if event.kind == PeerEventKind.Joined:
b.setupPeer(peerId) b.setupPeer(peerId)
else: else:
@ -339,7 +339,7 @@ proc new*(
inflightSema: newAsyncSemaphore(maxInflight)) inflightSema: newAsyncSemaphore(maxInflight))
proc sendWantList( proc sendWantList(
id: PeerID, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
@ -350,16 +350,16 @@ proc new*(
id, cids, priority, cancel, id, cids, priority, cancel,
wantType, full, sendDontHave) wantType, full, sendDontHave)
proc sendBlocks(id: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} = proc sendBlocks(id: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
self.sendBlocks(id, blocks) self.sendBlocks(id, blocks)
proc sendPresence(id: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
self.sendBlockPresence(id, presence) self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerID, account: Account): Future[void] {.gcsafe.} = proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
self.sendAccount(id, account) self.sendAccount(id, account)
proc sendPayment(id: PeerID, payment: SignedState): Future[void] {.gcsafe.} = proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
self.sendPayment(id, payment) self.sendPayment(id, payment)
self.request = BlockExcRequest( self.request = BlockExcRequest(

View File

@ -46,7 +46,7 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
while not conn.atEof or not conn.closed: while not conn.atEof or not conn.closed:
let let
data = await conn.readLp(MaxMessageSize) data = await conn.readLp(MaxMessageSize)
msg = Message.ProtobufDecode(data).mapFailure().tryGet() msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Got message for peer", peer = b.id trace "Got message for peer", peer = b.id
await b.handler(b, msg) await b.handler(b, msg)
except CatchableError as exc: except CatchableError as exc:
@ -70,7 +70,7 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} =
return return
trace "Sending message to remote", peer = b.id trace "Sending message to remote", peer = b.id
await conn.writeLp(ProtobufEncode(msg)) await conn.writeLp(protobufEncode(msg))
proc broadcast*(b: NetworkPeer, msg: Message) = proc broadcast*(b: NetworkPeer, msg: Message) =
proc sendAwaiter() {.async.} = proc sendAwaiter() {.async.} =

View File

@ -27,7 +27,7 @@ logScope:
type type
BlockExcPeerCtx* = ref object of RootObj BlockExcPeerCtx* = ref object of RootObj
id*: PeerID id*: PeerId
blocks*: Table[Cid, Presence] # remote peer have list including price blocks*: Table[Cid, Presence] # remote peer have list including price
peerWants*: seq[Entry] # remote peers want lists peerWants*: seq[Entry] # remote peers want lists
exchanged*: int # times peer has exchanged with us exchanged*: int # times peer has exchanged with us

View File

@ -29,30 +29,30 @@ logScope:
type type
PeerCtxStore* = ref object of RootObj PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerID, BlockExcPeerCtx] peers*: OrderedTable[PeerId, BlockExcPeerCtx]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx = iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
for p in self.peers.values: for p in self.peers.values:
yield p yield p
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerID): bool = proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
## Convenience method to check for peer precense ## Convenience method to check for peer precense
## ##
a.anyIt( it.id == b ) a.anyIt( it.id == b )
func contains*(self: PeerCtxStore, peerId: PeerID): bool = func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) = func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
trace "Adding peer to peer context store", peer = peer.id trace "Adding peer to peer context store", peer = peer.id
self.peers[peer.id] = peer self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerID) = func remove*(self: PeerCtxStore, peerId: PeerId) =
trace "Removing peer from peer context store", peer = peerId trace "Removing peer from peer context store", peer = peerId
self.peers.del(peerId) self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerID): BlockExcPeerCtx = func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
trace "Retrieving peer from peer context store", peer = peerId trace "Retrieving peer from peer context store", peer = peerId
self.peers.getOrDefault(peerId, nil) self.peers.getOrDefault(peerId, nil)
@ -94,4 +94,4 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
proc new*(T: type PeerCtxStore): PeerCtxStore = proc new*(T: type PeerCtxStore): PeerCtxStore =
T( T(
peers: initOrderedTable[PeerID, BlockExcPeerCtx]()) peers: initOrderedTable[PeerId, BlockExcPeerCtx]())

View File

@ -13,7 +13,7 @@ import pkg/libp2p
import message import message
export Message, ProtobufEncode, ProtobufDecode export Message, protobufEncode, protobufDecode
export Wantlist, WantType, Entry export Wantlist, WantType, Entry
export Block, BlockPresenceType, BlockPresence export Block, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate export AccountMessage, StateChannelUpdate

View File

@ -98,7 +98,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
ipb.finish() ipb.finish()
pb.write(field, ipb) pb.write(field, ipb)
proc ProtobufEncode*(value: Message): seq[byte] = proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer() var ipb = initProtoBuffer()
ipb.write(1, value.wantlist) ipb.write(1, value.wantlist)
for v in value.payload: for v in value.payload:
@ -172,7 +172,7 @@ proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChan
discard ? pb.getField(1, value.update) discard ? pb.getField(1, value.update)
ok(value) ok(value)
proc ProtobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var var
value = Message() value = Message()
pb = initProtoBuffer(msg) pb = initProtoBuffer(msg)

View File

@ -37,11 +37,11 @@ type
template EmptyCid*: untyped = template EmptyCid*: untyped =
var var
emptyCid {.global, threadvar.}: EmptyCid {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Cid]] array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
once: once:
emptyCid = [ EmptyCid = [
CIDv0: { CIDv0: {
multiCodec("sha2-256"): Cid multiCodec("sha2-256"): Cid
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") .init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
@ -54,15 +54,15 @@ template EmptyCid*: untyped =
}.toTable, }.toTable,
] ]
emptyCid EmptyCid
template EmptyDigests*: untyped = template EmptyDigests*: untyped =
var var
emptyDigests {.global, threadvar.}: EmptyDigests {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]] array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
once: once:
emptyDigests = [ EmptyDigests = [
CIDv0: { CIDv0: {
multiCodec("sha2-256"): EmptyCid[CIDv0] multiCodec("sha2-256"): EmptyCid[CIDv0]
.catch .catch
@ -83,15 +83,15 @@ template EmptyDigests*: untyped =
}.toTable, }.toTable,
] ]
emptyDigests EmptyDigests
template EmptyBlock*: untyped = template EmptyBlock*: untyped =
var var
emptyBlock {.global, threadvar.}: EmptyBlock {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Block]] array[CIDv0..CIDv1, Table[MultiCodec, Block]]
once: once:
emptyBlock = [ EmptyBlock = [
CIDv0: { CIDv0: {
multiCodec("sha2-256"): Block( multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv0][multiCodec("sha2-256")]) cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
@ -102,7 +102,7 @@ template EmptyBlock*: untyped =
}.toTable, }.toTable,
] ]
emptyBlock EmptyBlock
proc isEmpty*(cid: Cid): bool = proc isEmpty*(cid: Cid): bool =
cid == EmptyCid[cid.cidver] cid == EmptyCid[cid.cidver]

View File

@ -57,7 +57,7 @@ proc toNodeId*(host: ca.Address): NodeId =
proc findPeer*( proc findPeer*(
d: Discovery, d: Discovery,
peerId: PeerID): Future[?PeerRecord] {.async.} = peerId: PeerId): Future[?PeerRecord] {.async.} =
let let
node = await d.protocol.resolve(toNodeId(peerId)) node = await d.protocol.resolve(toNodeId(peerId))

View File

@ -168,7 +168,7 @@ proc decode*(
new_manifest = encoded.len new_manifest = encoded.len
var var
decoder = self.decoderProvider(encoded.blockSize, encoded.K, encoded.M) decoder = self.decoderProvider(encoded.blockSize, encoded.ecK, encoded.ecM)
try: try:
for i in 0..<encoded.steps: for i in 0..<encoded.steps:
@ -187,9 +187,9 @@ proc decode*(
await sleepAsync(10.millis) await sleepAsync(10.millis)
var var
data = newSeq[seq[byte]](encoded.K) # number of blocks to encode data = newSeq[seq[byte]](encoded.ecK) # number of blocks to encode
parityData = newSeq[seq[byte]](encoded.M) parityData = newSeq[seq[byte]](encoded.ecM)
recovered = newSeqWith[seq[byte]](encoded.K, newSeq[byte](encoded.blockSize)) recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize))
idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier
emptyBlock = newSeq[byte](encoded.blockSize) emptyBlock = newSeq[byte](encoded.blockSize)
resolved = 0 resolved = 0
@ -197,7 +197,7 @@ proc decode*(
while true: while true:
# Continue to receive blocks until we have just enough for decoding # Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive # or no more blocks can arrive
if (resolved >= encoded.K) or (idxPendingBlocks.len == 0): if (resolved >= encoded.ecK) or (idxPendingBlocks.len == 0):
break break
let let
@ -210,9 +210,9 @@ proc decode*(
trace "Failed retrieving block", error = error.msg trace "Failed retrieving block", error = error.msg
continue continue
if idx >= encoded.K: if idx >= encoded.ecK:
trace "Retrieved parity block", cid = blk.cid, idx trace "Retrieved parity block", cid = blk.cid, idx
shallowCopy(parityData[idx - encoded.K], if blk.isEmpty: emptyBlock else: blk.data) shallowCopy(parityData[idx - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
else: else:
trace "Retrieved data block", cid = blk.cid, idx trace "Retrieved data block", cid = blk.cid, idx
shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data) shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data)
@ -223,7 +223,7 @@ proc decode*(
dataPieces = data.filterIt( it.len > 0 ).len dataPieces = data.filterIt( it.len > 0 ).len
parityPieces = parityData.filterIt( it.len > 0 ).len parityPieces = parityData.filterIt( it.len > 0 ).len
if dataPieces >= encoded.K: if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces
continue continue
@ -234,7 +234,7 @@ proc decode*(
trace "Unable to decode manifest!", err = $err.error trace "Unable to decode manifest!", err = $err.error
return failure($err.error) return failure($err.error)
for i in 0..<encoded.K: for i in 0..<encoded.ecK:
if data[i].len <= 0: if data[i].len <= 0:
without blk =? bt.Block.new(recovered[i]), error: without blk =? bt.Block.new(recovered[i]), error:
trace "Unable to create block!", exc = error.msg trace "Unable to create block!", exc = error.msg

View File

@ -68,8 +68,8 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
header.write(5, manifest.originalBytes.uint64) header.write(5, manifest.originalBytes.uint64)
if manifest.protected: if manifest.protected:
var erasureInfo = initProtoBuffer() var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.K.uint32) erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.M.uint32) erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalCid.data.buffer) erasureInfo.write(3, manifest.originalCid.data.buffer)
erasureInfo.write(4, manifest.originalLen.uint32) erasureInfo.write(4, manifest.originalLen.uint32)
erasureInfo.finish() erasureInfo.finish()
@ -95,7 +95,7 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
blockSize: uint32 blockSize: uint32
blocksLen: uint32 blocksLen: uint32
originalLen: uint32 originalLen: uint32
K, M: uint32 ecK, ecM: uint32
blocks: seq[Cid] blocks: seq[Cid]
# Decode `Header` message # Decode `Header` message
@ -119,10 +119,10 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
return failure("Unable to decode `erasureInfo` from manifest!") return failure("Unable to decode `erasureInfo` from manifest!")
if pbErasureInfo.buffer.len > 0: if pbErasureInfo.buffer.len > 0:
if pbErasureInfo.getField(1, K).isErr: if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!") return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, M).isErr: if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!") return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalCid).isErr: if pbErasureInfo.getField(3, originalCid).isErr:
@ -157,8 +157,8 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
protected: pbErasureInfo.buffer.len > 0) protected: pbErasureInfo.buffer.len > 0)
if self.protected: if self.protected:
self.K = K.int self.ecK = ecK.int
self.M = M.int self.ecM = ecM.int
self.originalCid = ? Cid.init(originalCid).mapFailure self.originalCid = ? Cid.init(originalCid).mapFailure
self.originalLen = originalLen.int self.originalLen = originalLen.int

View File

@ -83,11 +83,11 @@ func bytes*(self: Manifest, pad = true): int =
func rounded*(self: Manifest): int = func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end ## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalLen, self.K) roundUp(self.originalLen, self.ecK)
func steps*(self: Manifest): int = func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest ## Number of EC groups in *protected* manifest
divUp(self.originalLen, self.K) divUp(self.originalLen, self.ecK)
func verify*(self: Manifest): ?!void = func verify*(self: Manifest): ?!void =
## Check manifest correctness ## Check manifest correctness
@ -97,7 +97,7 @@ func verify*(self: Manifest): ?!void =
if divUp(self.originalBytes, self.blockSize) != originalLen: if divUp(self.originalBytes, self.blockSize) != originalLen:
return failure newException(CodexError, "Broken manifest: wrong originalBytes") return failure newException(CodexError, "Broken manifest: wrong originalBytes")
if self.protected and (self.len != self.steps * (self.K + self.M)): if self.protected and (self.len != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalLen") return failure newException(CodexError, "Broken manifest: wrong originalLen")
return success() return success()
@ -184,7 +184,7 @@ proc new*(
proc new*( proc new*(
T: type Manifest, T: type Manifest,
manifest: Manifest, manifest: Manifest,
K, M: int): ?!Manifest = ecK, ecM: int): ?!Manifest =
## Create an erasure protected dataset from an ## Create an erasure protected dataset from an
## un-protected one ## un-protected one
## ##
@ -197,12 +197,12 @@ proc new*(
originalBytes: manifest.originalBytes, originalBytes: manifest.originalBytes,
blockSize: manifest.blockSize, blockSize: manifest.blockSize,
protected: true, protected: true,
K: K, M: M, ecK: ecK, ecM: ecM,
originalCid: ? manifest.cid, originalCid: ? manifest.cid,
originalLen: manifest.len) originalLen: manifest.len)
let let
encodedLen = self.rounded + (self.steps * M) encodedLen = self.rounded + (self.steps * ecM)
self.blocks = newSeq[Cid](encodedLen) self.blocks = newSeq[Cid](encodedLen)

View File

@ -37,8 +37,8 @@ type
codec*: MultiCodec # Data set codec codec*: MultiCodec # Data set codec
case protected*: bool # Protected datasets have erasure coded info case protected*: bool # Protected datasets have erasure coded info
of true: of true:
K*: int # Number of blocks to encode ecK*: int # Number of blocks to encode
M*: int # Number of resulting parity blocks ecM*: int # Number of resulting parity blocks
originalCid*: Cid # The original Cid of the dataset being erasure coded originalCid*: Cid # The original Cid of the dataset being erasure coded
originalLen*: int # The length of the original manifest originalLen*: int # The length of the original manifest
else: else:

View File

@ -45,7 +45,7 @@ type
CodexNodeRef* = ref object CodexNodeRef* = ref object
switch*: Switch switch*: Switch
networkId*: PeerID networkId*: PeerId
blockStore*: BlockStore blockStore*: BlockStore
engine*: BlockExcEngine engine*: BlockExcEngine
erasure*: Erasure erasure*: Erasure
@ -54,12 +54,12 @@ type
proc findPeer*( proc findPeer*(
node: CodexNodeRef, node: CodexNodeRef,
peerId: PeerID): Future[?PeerRecord] {.async.} = peerId: PeerId): Future[?PeerRecord] {.async.} =
return await node.discovery.findPeer(peerId) return await node.discovery.findPeer(peerId)
proc connect*( proc connect*(
node: CodexNodeRef, node: CodexNodeRef,
peerId: PeerID, peerId: PeerId,
addrs: seq[MultiAddress]): Future[void] = addrs: seq[MultiAddress]): Future[void] =
node.switch.connect(peerId, addrs) node.switch.connect(peerId, addrs)
@ -288,7 +288,7 @@ proc requestStorage*(self: CodexNodeRef,
erasure: StorageErasure( erasure: StorageErasure(
totalChunks: encoded.len.uint64, totalChunks: encoded.len.uint64,
), ),
por: StoragePor( por: StoragePoR(
u: @[], # TODO: PoR setup u: @[], # TODO: PoR setup
publicKey: @[], # TODO: PoR setup publicKey: @[], # TODO: PoR setup
name: @[] # TODO: PoR setup name: @[] # TODO: PoR setup

View File

@ -50,7 +50,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
router.api( router.api(
MethodGet, MethodGet,
"/api/codex/v1/connect/{peerId}") do ( "/api/codex/v1/connect/{peerId}") do (
peerId: PeerID, peerId: PeerId,
addrs: seq[MultiAddress]) -> RestApiResponse: addrs: seq[MultiAddress]) -> RestApiResponse:
## Connect to a peer ## Connect to a peer
## ##

View File

@ -37,7 +37,7 @@ proc encodeString*(peerId: PeerId): Result[string, cstring] =
ok($peerId) ok($peerId)
proc decodeString*(T: type PeerId, value: string): Result[PeerId, cstring] = proc decodeString*(T: type PeerId, value: string): Result[PeerId, cstring] =
PeerID.init(value) PeerId.init(value)
proc encodeString*(address: MultiAddress): Result[string, cstring] = proc encodeString*(address: MultiAddress): Result[string, cstring] =
ok($address) ok($address)

View File

@ -39,7 +39,7 @@ proc fromJson*(_: type StorageRequestParams,
func `%`*(address: Address): JsonNode = func `%`*(address: Address): JsonNode =
% $address % $address
func `%`*(stint: StInt|StUInt): JsonNode = func `%`*(stint: StInt|StUint): JsonNode =
%("0x" & stint.toHex) %("0x" & stint.toHex)
func `%`*(arr: openArray[byte]): JsonNode = func `%`*(arr: openArray[byte]): JsonNode =

View File

@ -125,8 +125,8 @@ type
# PoR query element # PoR query element
QElement* = object QElement* = object
I*: int64 i*: int64
V*: blst_scalar v*: blst_scalar
PoR* = object PoR* = object
ssk*: SecretKey ssk*: SecretKey
@ -157,7 +157,7 @@ proc getSector(
## ##
var res: ZChar var res: ZChar
stream.setPos(((blockid * spb + sectorid) * ZChar.len).int) stream.setPos(((blockId * spb + sectorId) * ZChar.len).int)
discard await stream.readOnce(addr res[0], ZChar.len) discard await stream.readOnce(addr res[0], ZChar.len)
return res return res
@ -165,8 +165,8 @@ proc rndScalar(): blst_scalar =
## Generate random scalar within the subroup order r ## Generate random scalar within the subroup order r
## ##
var scal {.noInit.}: array[32, byte] var scal : array[32, byte]
var scalar {.noInit.}: blst_scalar var scalar : blst_scalar
while true: while true:
for val in scal.mitems: for val in scal.mitems:
@ -183,7 +183,7 @@ proc rndP2(): (blst_p2, blst_scalar) =
## ##
var var
x {.noInit.}: blst_p2 x : blst_p2
x.blst_p2_from_affine(BLS12_381_G2) # init from generator x.blst_p2_from_affine(BLS12_381_G2) # init from generator
let let
@ -195,7 +195,7 @@ proc rndP2(): (blst_p2, blst_scalar) =
proc rndP1(): (blst_p1, blst_scalar) = proc rndP1(): (blst_p1, blst_scalar) =
## Generate random point on G1 ## Generate random point on G1
var var
x {.noInit.}: blst_p1 x : blst_p1
x.blst_p1_from_affine(BLS12_381_G1) # init from generator x.blst_p1_from_affine(BLS12_381_G1) # init from generator
let let
@ -316,8 +316,8 @@ proc generateQuery*(tau: Tau, l: int): seq[QElement] =
for i in 0..<l: for i in 0..<l:
var q: QElement var q: QElement
q.I = Rng.instance.rand(n-1) #TODO: dedup q.i = Rng.instance.rand(n-1) #TODO: dedup
q.V = rndScalar() #TODO: fix range q.v = rndScalar() #TODO: fix range
result.add(q) result.add(q)
proc generateProof*( proc generateProof*(
@ -337,13 +337,13 @@ proc generateProof*(
for qelem in q: for qelem in q:
let let
sect = fromBytesBE((await stream.getSector(qelem.I, j, s))) sect = fromBytesBE((await stream.getSector(qelem.i, j, s)))
var var
x, v, sector: blst_fr x, v, sector: blst_fr
sector.blst_fr_from_scalar(sect) sector.blst_fr_from_scalar(sect)
v.blst_fr_from_scalar(qelem.V) v.blst_fr_from_scalar(qelem.v)
x.blst_fr_mul(v, sector) x.blst_fr_mul(v, sector)
muj.blst_fr_add(muj, x) muj.blst_fr_add(muj, x)
@ -360,7 +360,7 @@ proc generateProof*(
var var
prod: blst_p1 prod: blst_p1
prod.blst_p1_mult(authenticators[qelem.I], qelem.V, 255) prod.blst_p1_mult(authenticators[qelem.i], qelem.v, 255)
sigma.blst_p1_add_or_double(sigma, prod) sigma.blst_p1_add_or_double(sigma, prod)
return Proof(mu: mu, sigma: sigma) return Proof(mu: mu, sigma: sigma)
@ -411,7 +411,7 @@ proc verifyProof*(
var first: blst_p1 var first: blst_p1
for qelem in q: for qelem in q:
var prod: blst_p1 var prod: blst_p1
prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.I), qelem.V, 255) prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.i), qelem.v, 255)
first.blst_p1_add_or_double(first, prod) first.blst_p1_add_or_double(first, prod)
doAssert(blst_p1_on_curve(first).bool) doAssert(blst_p1_on_curve(first).bool)
@ -426,7 +426,7 @@ proc verifyProof*(
var sum: blst_p1 var sum: blst_p1
sum.blst_p1_add_or_double(first, second) sum.blst_p1_add_or_double(first, second)
var g {.noInit.}: blst_p2 var g : blst_p2
g.blst_p2_from_affine(BLS12_381_G2) g.blst_p2_from_affine(BLS12_381_G2)
return verifyPairings(sum, self.spk.key, sigma, g) return verifyPairings(sum, self.spk.key, sigma, g)

View File

@ -73,7 +73,7 @@ method listBlocks*(
raiseAssert("Not implemented!") raiseAssert("Not implemented!")
method close*(self: Blockstore): Future[void] {.base.} = method close*(self: BlockStore): Future[void] {.base.} =
## Close the blockstore, cleaning up resources managed by it. ## Close the blockstore, cleaning up resources managed by it.
## For some implementations this may be a no-op ## For some implementations this may be a no-op
## ##

View File

@ -49,7 +49,7 @@ proc secureWriteFile*[T: byte|char](path: string,
else: else:
writeFile(path, data, 0o600) writeFile(path, data, 0o600)
proc checkSecureFile*(path: string): IOResult[bool] = proc checkSecureFile*(path: string): IoResult[bool] =
when defined(windows): when defined(windows):
checkCurrentUserOnlyACL(path) checkCurrentUserOnlyACL(path)
else: else:

View File

@ -69,6 +69,8 @@ else:
--define:metrics --define:metrics
# for heap-usage-by-instance-type metrics and object base-type strings # for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames --define:nimTypeNames
--styleCheck:usages
--styleCheck:error
when (NimMajor, NimMinor) >= (1, 4): when (NimMajor, NimMinor) >= (1, 4):
--warning:"ObservableStores:off" --warning:"ObservableStores:off"

View File

@ -149,7 +149,7 @@ suite "Block Advertising and Discovery":
pendingBlocks = blocks.mapIt( pendingBlocks = blocks.mapIt(
engine.pendingBlocks.getWantHandle(it.cid) engine.pendingBlocks.getWantHandle(it.cid)
) )
peerId = PeerID.example peerId = PeerId.example
haves = collect(initTable()): haves = collect(initTable()):
for blk in blocks: for blk in blocks:
{ blk.cid: Presence(cid: blk.cid, price: 0.u256) } { blk.cid: Presence(cid: blk.cid, price: 0.u256) }

View File

@ -153,7 +153,7 @@ suite "Test Discovery Engine":
check cid == blocks[0].cid check cid == blocks[0].cid
check peerStore.len < minPeers check peerStore.len < minPeers
var var
peerCtx = BlockExcPeerCtx(id: PeerID.example) peerCtx = BlockExcPeerCtx(id: PeerId.example)
peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256) peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256)
peerStore.add(peerCtx) peerStore.add(peerCtx)

View File

@ -24,7 +24,7 @@ suite "NetworkStore engine basic":
var var
rng: Rng rng: Rng
seckey: PrivateKey seckey: PrivateKey
peerId: PeerID peerId: PeerId
chunker: Chunker chunker: Chunker
wallet: WalletRef wallet: WalletRef
blockDiscovery: Discovery blockDiscovery: Discovery
@ -36,7 +36,7 @@ suite "NetworkStore engine basic":
setup: setup:
rng = Rng.instance() rng = Rng.instance()
seckey = PrivateKey.random(rng[]).tryGet() seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
wallet = WalletRef.example wallet = WalletRef.example
blockDiscovery = Discovery.new() blockDiscovery = Discovery.new()
@ -54,7 +54,7 @@ suite "NetworkStore engine basic":
test "Should send want list to new peers": test "Should send want list to new peers":
proc sendWantList( proc sendWantList(
id: PeerID, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
@ -94,7 +94,7 @@ suite "NetworkStore engine basic":
test "Should send account to new peers": test "Should send account to new peers":
let pricing = Pricing.example let pricing = Pricing.example
proc sendAccount(peer: PeerID, account: Account) {.gcsafe, async.} = proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} =
check account.address == pricing.address check account.address == pricing.address
done.complete() done.complete()
@ -129,7 +129,7 @@ suite "NetworkStore engine handlers":
var var
rng: Rng rng: Rng
seckey: PrivateKey seckey: PrivateKey
peerId: PeerID peerId: PeerId
chunker: Chunker chunker: Chunker
wallet: WalletRef wallet: WalletRef
blockDiscovery: Discovery blockDiscovery: Discovery
@ -154,7 +154,7 @@ suite "NetworkStore engine handlers":
blocks.add(bt.Block.new(chunk).tryGet()) blocks.add(bt.Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet() seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
wallet = WalletRef.example wallet = WalletRef.example
blockDiscovery = Discovery.new() blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new() peerStore = PeerCtxStore.new()
@ -204,7 +204,7 @@ suite "NetworkStore engine handlers":
done = newFuture[void]() done = newFuture[void]()
wantList = makeWantList(blocks.mapIt( it.cid )) wantList = makeWantList(blocks.mapIt( it.cid ))
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` ) check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
done.complete() done.complete()
@ -226,7 +226,7 @@ suite "NetworkStore engine handlers":
blocks.mapIt( it.cid ), blocks.mapIt( it.cid ),
sendDontHave = true) sendDontHave = true)
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` ) check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
for p in presence: for p in presence:
check: check:
@ -248,7 +248,7 @@ suite "NetworkStore engine handlers":
blocks.mapIt( it.cid ), blocks.mapIt( it.cid ),
sendDontHave = true) sendDontHave = true)
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
let let
cid1Buf = blocks[0].cid.data.buffer cid1Buf = blocks[0].cid.data.buffer
cid2Buf = blocks[1].cid.data.buffer cid2Buf = blocks[1].cid.data.buffer
@ -297,7 +297,7 @@ suite "NetworkStore engine handlers":
engine.network = BlockExcNetwork( engine.network = BlockExcNetwork(
request: BlockExcRequest( request: BlockExcRequest(
sendPayment: proc(receiver: PeerID, payment: SignedState) {.gcsafe, async.} = sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} =
let let
amount = amount =
blocks.mapIt( blocks.mapIt(
@ -319,7 +319,7 @@ suite "NetworkStore engine handlers":
handles: Table[Cid, Future[bt.Block]] handles: Table[Cid, Future[bt.Block]]
proc sendWantList( proc sendWantList(
id: PeerID, id: PeerId,
cids: seq[Cid], cids: seq[Cid],
priority: int32 = 0, priority: int32 = 0,
cancel: bool = false, cancel: bool = false,
@ -356,7 +356,7 @@ suite "Task Handler":
var var
rng: Rng rng: Rng
seckey: PrivateKey seckey: PrivateKey
peerId: PeerID peerId: PeerId
chunker: Chunker chunker: Chunker
wallet: WalletRef wallet: WalletRef
blockDiscovery: Discovery blockDiscovery: Discovery
@ -368,7 +368,7 @@ suite "Task Handler":
localStore: BlockStore localStore: BlockStore
peersCtx: seq[BlockExcPeerCtx] peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerID] peers: seq[PeerId]
blocks: seq[bt.Block] blocks: seq[bt.Block]
setup: setup:
@ -382,7 +382,7 @@ suite "Task Handler":
blocks.add(bt.Block.new(chunk).tryGet()) blocks.add(bt.Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet() seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
wallet = WalletRef.example wallet = WalletRef.example
blockDiscovery = Discovery.new() blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new() peerStore = PeerCtxStore.new()
@ -409,7 +409,7 @@ suite "Task Handler":
for i in 0..3: for i in 0..3:
let seckey = PrivateKey.random(rng[]).tryGet() let seckey = PrivateKey.random(rng[]).tryGet()
peers.add(PeerID.init(seckey.getPublicKey().tryGet()).tryGet()) peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet())
peersCtx.add(BlockExcPeerCtx( peersCtx.add(BlockExcPeerCtx(
id: peers[i] id: peers[i]
@ -420,7 +420,7 @@ suite "Task Handler":
test "Should send want-blocks in priority order": test "Should send want-blocks in priority order":
proc sendBlocks( proc sendBlocks(
id: PeerID, id: PeerId,
blks: seq[bt.Block]) {.gcsafe, async.} = blks: seq[bt.Block]) {.gcsafe, async.} =
check blks.len == 2 check blks.len == 2
check: check:
@ -458,7 +458,7 @@ suite "Task Handler":
let missing = @[bt.Block.new("missing".toBytes).tryGet()] let missing = @[bt.Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price let price = (!engine.pricing).price
proc sendPresence(id: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} = proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt(!Presence.init(it)) == @[ check presence.mapIt(!Presence.init(it)) == @[
Presence(cid: present[0].cid, have: true, price: price), Presence(cid: present[0].cid, have: true, price: price),
Presence(cid: present[1].cid, have: true, price: price), Presence(cid: present[1].cid, have: true, price: price),

View File

@ -18,7 +18,7 @@ suite "Network - Handlers":
let let
rng = Rng.instance() rng = Rng.instance()
seckey = PrivateKey.random(rng[]).tryGet() seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet() peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
var var
@ -49,7 +49,7 @@ suite "Network - Handlers":
discard await networkPeer.connect() discard await networkPeer.connect()
test "Want List handler": test "Want List handler":
proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} = proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
# check that we got the correct amount of entries # check that we got the correct amount of entries
check wantList.entries.len == 4 check wantList.entries.len == 4
@ -71,25 +71,25 @@ suite "Network - Handlers":
true, true) true, true)
let msg = Message(wantlist: wantList) let msg = Message(wantlist: wantList)
await buffer.pushData(lenPrefix(ProtobufEncode(msg))) await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis) await done.wait(500.millis)
test "Blocks Handler": test "Blocks Handler":
proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} = proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks check blks == blocks
done.complete() done.complete()
network.handlers.onBlocks = blocksHandler network.handlers.onBlocks = blocksHandler
let msg = Message(payload: makeBlocks(blocks)) let msg = Message(payload: makeBlocks(blocks))
await buffer.pushData(lenPrefix(ProtobufEncode(msg))) await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis) await done.wait(500.millis)
test "Presence Handler": test "Presence Handler":
proc presenceHandler( proc presenceHandler(
peer: PeerID, peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} = precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks: for b in blocks:
check: check:
@ -105,35 +105,35 @@ suite "Network - Handlers":
cid: it.cid.data.buffer, cid: it.cid.data.buffer,
type: BlockPresenceType.Have type: BlockPresenceType.Have
))) )))
await buffer.pushData(lenPrefix(ProtobufEncode(msg))) await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis) await done.wait(500.millis)
test "Handles account messages": test "Handles account messages":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} = proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
check received == account check received == account
done.complete() done.complete()
network.handlers.onAccount = handleAccount network.handlers.onAccount = handleAccount
let message = Message(account: AccountMessage.init(account)) let message = Message(account: AccountMessage.init(account))
await buffer.pushData(lenPrefix(ProtobufEncode(message))) await buffer.pushData(lenPrefix(protobufEncode(message)))
await done.wait(100.millis) await done.wait(100.millis)
test "Handles payment messages": test "Handles payment messages":
let payment = SignedState.example let payment = SignedState.example
proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} = proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
check received == payment check received == payment
done.complete() done.complete()
network.handlers.onPayment = handlePayment network.handlers.onPayment = handlePayment
let message = Message(payment: StateChannelUpdate.init(payment)) let message = Message(payment: StateChannelUpdate.init(payment))
await buffer.pushData(lenPrefix(ProtobufEncode(message))) await buffer.pushData(lenPrefix(protobufEncode(message)))
await done.wait(100.millis) await done.wait(100.millis)
@ -179,7 +179,7 @@ suite "Network - Senders":
switch2.stop()) switch2.stop())
test "Send want list": test "Send want list":
proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} = proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
# check that we got the correct amount of entries # check that we got the correct amount of entries
check wantList.entries.len == 4 check wantList.entries.len == 4
@ -203,7 +203,7 @@ suite "Network - Senders":
await done.wait(500.millis) await done.wait(500.millis)
test "send blocks": test "send blocks":
proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} = proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks check blks == blocks
done.complete() done.complete()
@ -216,7 +216,7 @@ suite "Network - Senders":
test "send presence": test "send presence":
proc presenceHandler( proc presenceHandler(
peer: PeerID, peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} = precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks: for b in blocks:
check: check:
@ -239,7 +239,7 @@ suite "Network - Senders":
test "send account": test "send account":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} = proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
check received == account check received == account
done.complete() done.complete()
@ -251,7 +251,7 @@ suite "Network - Senders":
test "send payment": test "send payment":
let payment = SignedState.example let payment = SignedState.example
proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} = proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
check received == payment check received == payment
done.complete() done.complete()
@ -295,7 +295,7 @@ suite "Network - Test Limits":
test "Concurrent Sends": test "Concurrent Sends":
let account = Account(address: EthAddress.example) let account = Account(address: EthAddress.example)
network2.handlers.onAccount = network2.handlers.onAccount =
proc(peer: PeerID, received: Account) {.gcsafe, async.} = proc(peer: PeerId, received: Account) {.gcsafe, async.} =
check false check false
let fut = network1.send( let fut = network1.send(

View File

@ -43,12 +43,12 @@ proc example*(_: type bt.Block): bt.Block =
let bytes = newSeqWith(length, rand(uint8)) let bytes = newSeqWith(length, rand(uint8))
bt.Block.new(bytes).tryGet() bt.Block.new(bytes).tryGet()
proc example*(_: type PeerId): PeerID = proc example*(_: type PeerId): PeerId =
let key = PrivateKey.random(Rng.instance[]).get let key = PrivateKey.random(Rng.instance[]).get
PeerId.init(key.getPublicKey().get).get PeerId.init(key.getPublicKey().get).get
proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx = proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
BlockExcPeerCtx(id: PeerID.example) BlockExcPeerCtx(id: PeerId.example)
proc example*(_: type Cid): Cid = proc example*(_: type Cid): Cid =
bt.Block.example.cid bt.Block.example.cid

View File

@ -29,7 +29,7 @@ proc new*(T: type MockDiscovery): T =
proc findPeer*( proc findPeer*(
d: Discovery, d: Discovery,
peerId: PeerID): Future[?PeerRecord] {.async.} = peerId: PeerId): Future[?PeerRecord] {.async.} =
return none(PeerRecord) return none(PeerRecord)
method find*( method find*(

View File

@ -88,7 +88,7 @@ proc commonBlockStoreTests*(
for handle in putHandles: for handle in putHandles:
check not handle.failed check not handle.failed
check handle.read.isOK check handle.read.isOk
let let
cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet()
@ -111,7 +111,7 @@ proc commonBlockStoreTests*(
for handle in putHandles: for handle in putHandles:
check not handle.failed check not handle.failed
check handle.read.isOK check handle.read.isOk
let let
cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet()
@ -135,7 +135,7 @@ proc commonBlockStoreTests*(
for handle in putHandles: for handle in putHandles:
check not handle.failed check not handle.failed
check handle.read.isOK check handle.read.isOk
let let
cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet()

View File

@ -152,7 +152,7 @@ suite "BlockMaintainer":
test "Should handle new blocks": test "Should handle new blocks":
proc invokeTimerManyTimes(): Future[void] {.async.} = proc invokeTimerManyTimes(): Future[void] {.async.} =
for i in countUp(0, 10): for i in countup(0, 10):
await mockTimer.invokeCallback() await mockTimer.invokeCallback()
blockMaintainer.start() blockMaintainer.start()

View File

@ -64,7 +64,7 @@ suite "Erasure encode/decode":
column = rng.rand(encoded.len div encoded.steps) # random column column = rng.rand(encoded.len div encoded.steps) # random column
dropped: seq[Cid] dropped: seq[Cid]
for _ in 0..<encoded.M: for _ in 0..<encoded.ecM:
dropped.add(encoded[column]) dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet() (await store.delBlock(encoded[column])).tryGet()
column.inc(encoded.steps) column.inc(encoded.steps)
@ -92,7 +92,7 @@ suite "Erasure encode/decode":
column = rng.rand(encoded.len div encoded.steps) # random column column = rng.rand(encoded.len div encoded.steps) # random column
dropped: seq[Cid] dropped: seq[Cid]
for _ in 0..<encoded.M + 1: for _ in 0..<encoded.ecM + 1:
dropped.add(encoded[column]) dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet() (await store.delBlock(encoded[column])).tryGet()
column.inc(encoded.steps) column.inc(encoded.steps)
@ -122,7 +122,7 @@ suite "Erasure encode/decode":
let let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps)) blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
for _ in 0..<encoded.M: for _ in 0..<encoded.ecM:
blocks.add(rng.sample(blockIdx, blocks)) blocks.add(rng.sample(blockIdx, blocks))
offset.inc offset.inc
@ -150,7 +150,7 @@ suite "Erasure encode/decode":
let let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps)) blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
for _ in 0..<encoded.M + 1: # NOTE: the +1 for _ in 0..<encoded.ecM + 1: # NOTE: the +1
var idx: int var idx: int
while true: while true:
idx = rng.sample(blockIdx, blocks) idx = rng.sample(blockIdx, blocks)
@ -176,7 +176,7 @@ suite "Erasure encode/decode":
let encoded = await encode(buffers, parity) let encoded = await encode(buffers, parity)
for b in encoded.blocks[0..<encoded.steps * encoded.M]: for b in encoded.blocks[0..<encoded.steps * encoded.ecM]:
(await store.delBlock(b)).tryGet() (await store.delBlock(b)).tryGet()
discard (await erasure.decode(encoded)).tryGet() discard (await erasure.decode(encoded)).tryGet()
@ -192,7 +192,7 @@ suite "Erasure encode/decode":
let encoded = await encode(buffers, parity) let encoded = await encode(buffers, parity)
for b in encoded.blocks[^(encoded.steps * encoded.M)..^1]: for b in encoded.blocks[^(encoded.steps * encoded.ecM)..^1]:
(await store.delBlock(b)).tryGet() (await store.delBlock(b)).tryGet()
discard (await erasure.decode(encoded)).tryGet() discard (await erasure.decode(encoded)).tryGet()

View File

@ -86,8 +86,8 @@ suite "Manifest":
decoded.protected == true decoded.protected == true
decoded.originalLen == manifest.len decoded.originalLen == manifest.len
decoded.K == protected.K decoded.ecK == protected.ecK
decoded.M == protected.M decoded.ecM == protected.ecM
decoded.originalCid == protected.originalCid decoded.originalCid == protected.originalCid
decoded.originalCid == manifest.cid.tryGet() decoded.originalCid == manifest.cid.tryGet()

View File

@ -17,7 +17,7 @@ suite "StoreStream":
stream: StoreStream stream: StoreStream
# Check that `buf` contains `size` bytes with values start, start+1... # Check that `buf` contains `size` bytes with values start, start+1...
proc sequential_bytes(buf: seq[byte], size: int, start: int): bool = proc sequentialBytes(buf: seq[byte], size: int, start: int): bool =
for i in 0..<size: for i in 0..<size:
if int(buf[i]) != start+i: if int(buf[i]) != start+i:
return false return false
@ -60,7 +60,7 @@ suite "StoreStream":
else: else:
check read == 4 check read == 4
check sequential_bytes(buf,read,n) check sequentialBytes(buf,read,n)
n += read n += read
test "Read all blocks == blockSize": test "Read all blocks == blockSize":
@ -71,7 +71,7 @@ suite "StoreStream":
while not stream.atEof: while not stream.atEof:
let read = (await stream.readOnce(addr buf[0], buf.len)) let read = (await stream.readOnce(addr buf[0], buf.len))
check read == 10 check read == 10
check sequential_bytes(buf,read,n) check sequentialBytes(buf,read,n)
n += read n += read
test "Read all blocks > blockSize": test "Read all blocks > blockSize":
@ -87,7 +87,7 @@ suite "StoreStream":
else: else:
check read == 1 check read == 1
check sequential_bytes(buf,read,n) check sequentialBytes(buf,read,n)
n += read n += read
test "Read exact bytes within block boundary": test "Read exact bytes within block boundary":
@ -95,11 +95,11 @@ suite "StoreStream":
buf = newSeq[byte](5) buf = newSeq[byte](5)
await stream.readExactly(addr buf[0], 5) await stream.readExactly(addr buf[0], 5)
check sequential_bytes(buf,5,0) check sequentialBytes(buf,5,0)
test "Read exact bytes outside of block boundary": test "Read exact bytes outside of block boundary":
var var
buf = newSeq[byte](15) buf = newSeq[byte](15)
await stream.readExactly(addr buf[0], 15) await stream.readExactly(addr buf[0], 15)
check sequential_bytes(buf,15,0) check sequentialBytes(buf,15,0)

View File

@ -37,7 +37,7 @@ proc example*(_: type StorageRequest): StorageRequest =
erasure: StorageErasure( erasure: StorageErasure(
totalChunks: 12, totalChunks: 12,
), ),
por: StoragePor( por: StoragePoR(
u: @(array[480, byte].example), u: @(array[480, byte].example),
publicKey: @(array[96, byte].example), publicKey: @(array[96, byte].example),
name: @(array[512, byte].example) name: @(array[512, byte].example)

2
vendor/nim-blscurve vendored

@ -1 +1 @@
Subproject commit 0237e4e0e914fc19359c18a66406d33bc942775c Subproject commit 48d8668c5a9a350d3a7ee0c3713ef9a11980a40d

@ -1 +1 @@
Subproject commit 44c198b96a2d687f94c9971f4a3ece02b330347b Subproject commit 0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa

2
vendor/nim-ethers vendored

@ -1 +1 @@
Subproject commit e462649aecc9dedc1cdc1319a9089d2e40578284 Subproject commit 577e02b8a25198d6897c1f4871b5fd8e1f859e5a

2
vendor/nim-leopard vendored

@ -1 +1 @@
Subproject commit ae043fd262d2cc2f46db4a9f2f8054e73167a970 Subproject commit 1a6f2ab7252426a6ac01482a68b75d0c3b134cf0

@ -1 +1 @@
Subproject commit 8d408ac6cfc9c24ec8b7b65d5993e85050dcbaa9 Subproject commit b3673c7a7a959ccacb393bd9b47e997bbd177f5a