Enable stylecheck (#353)

* applying styleCheck

* stuck on vendor folder

* Applies style check

* Turns styleCheck back off

* switches to stylecheck:usages

* Fixes empty template casing

* rolls up nim-blscurve, nim-datastore, nim-ethers, nim-leopard, and nim-taskpools.

* bumps nim-confutils and removes unused import from fileutils.nim

* Unused using in fileutils.nim is required by CI

* Reverts bump of nim-confutils module
This commit is contained in:
Ben Bierens 2023-03-10 08:02:54 +01:00 committed by GitHub
parent 7a0a48e4a5
commit da79660f8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 176 additions and 174 deletions

View File

@ -30,7 +30,7 @@ import ./pendingblocks
logScope:
topics = "codex discoveryengine"
declareGauge(codex_inflight_discovery, "inflight discovery requests")
declareGauge(codexInflightDiscovery, "inflight discovery requests")
const
DefaultConcurrentDiscRequests = 10
@ -109,13 +109,13 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
trace "Advertising block", cid, inflight = b.inFlightAdvReqs.len
await request
finally:
b.inFlightAdvReqs.del(cid)
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
trace "Advertised block", cid, inflight = b.inFlightAdvReqs.len
except CatchableError as exc:
trace "Exception in advertise task runner", exc = exc.msg
@ -148,7 +148,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
.wait(DefaultDiscoveryTimeout)
b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
let
peers = await request
@ -163,7 +163,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
finally:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightAdvReqs.len.int64)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
except CatchableError as exc:
trace "Exception in discovery task runner", exc = exc.msg
@ -217,16 +217,16 @@ proc stop*(b: DiscoveryEngine) {.async.} =
return
b.discEngineRunning = false
for t in b.advertiseTasks:
if not t.finished:
for task in b.advertiseTasks:
if not task.finished:
trace "Awaiting advertise task to stop"
await t.cancelAndWait()
await task.cancelAndWait()
trace "Advertise task stopped"
for t in b.discoveryTasks:
if not t.finished:
for task in b.discoveryTasks:
if not task.finished:
trace "Awaiting discovery task to stop"
await t.cancelAndWait()
await task.cancelAndWait()
trace "Discovery task stopped"
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:

View File

@ -108,10 +108,10 @@ proc stop*(b: BlockExcEngine) {.async.} =
return
b.blockexcRunning = false
for t in b.blockexcTasks:
if not t.finished:
for task in b.blockexcTasks:
if not task.finished:
trace "Awaiting task to stop"
await t.cancelAndWait()
await task.cancelAndWait()
trace "Task stopped"
trace "NetworkStore stopped"
@ -316,7 +316,7 @@ proc blocksHandler*(
proc wantListHandler*(
b: BlockExcEngine,
peer: PeerId,
wantList: WantList) {.async.} =
wantList: Wantlist) {.async.} =
## Handle incoming want lists
##
@ -530,7 +530,7 @@ proc new*(
proc blockWantListHandler(
peer: PeerId,
wantList: WantList): Future[void] {.gcsafe.} =
wantList: Wantlist): Future[void] {.gcsafe.} =
engine.wantListHandler(peer, wantList)
proc blockPresenceHandler(

View File

@ -34,13 +34,13 @@ const
MaxInflight* = 100
type
WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.}
BlocksHandler* = proc(peer: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerID, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
WantListHandler* = proc(peer: PeerId, wantList: Wantlist): Future[void] {.gcsafe.}
BlocksHandler* = proc(peer: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
WantListSender* = proc(
id: PeerID,
id: PeerId,
cids: seq[Cid],
priority: int32 = 0,
cancel: bool = false,
@ -55,10 +55,10 @@ type
onAccount*: AccountHandler
onPayment*: PaymentHandler
BlocksSender* = proc(peer: PeerID, presence: seq[bt.Block]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerID, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerID, payment: SignedState): Future[void] {.gcsafe.}
BlocksSender* = proc(peer: PeerId, presence: seq[bt.Block]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
BlockExcRequest* = object
sendWantList*: WantListSender
@ -68,7 +68,7 @@ type
sendPayment*: PaymentSender
BlockExcNetwork* = ref object of LPProtocol
peers*: Table[PeerID, NetworkPeer]
peers*: Table[PeerId, NetworkPeer]
switch*: Switch
handlers*: BlockExcHandlers
request*: BlockExcRequest
@ -92,7 +92,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc handleWantList(
b: BlockExcNetwork,
peer: NetworkPeer,
list: WantList) {.async.} =
list: Wantlist) {.async.} =
## Handle incoming want list
##
@ -107,8 +107,8 @@ proc makeWantList*(
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): WantList =
WantList(
sendDontHave: bool = false): Wantlist =
Wantlist(
entries: cids.mapIt(
Entry(
`block`: it.data.buffer,
@ -120,7 +120,7 @@ proc makeWantList*(
proc sendWantList*(
b: BlockExcNetwork,
id: PeerID,
id: PeerId,
cids: seq[Cid],
priority: int32 = 0,
cancel: bool = false,
@ -175,7 +175,7 @@ template makeBlocks*(blocks: seq[bt.Block]): seq[pb.Block] =
proc sendBlocks*(
b: BlockExcNetwork,
id: PeerID,
id: PeerId,
blocks: seq[bt.Block]): Future[void] =
## Send blocks to remote
##
@ -195,7 +195,7 @@ proc handleBlockPresence(
proc sendBlockPresence*(
b: BlockExcNetwork,
id: PeerID,
id: PeerId,
presence: seq[BlockPresence]): Future[void] =
## Send presence to remote
##
@ -260,7 +260,7 @@ proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.async.} =
except CatchableError as exc:
trace "Exception in blockexc rpc handler", exc = exc.msg
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
@ -287,7 +287,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerID): NetworkPeer =
return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want
## list exchange
##
@ -297,7 +297,7 @@ proc setupPeer*(b: BlockExcNetwork, peer: PeerID) =
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerID) =
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
## Cleanup disconnected peer
##
@ -307,7 +307,7 @@ method init*(b: BlockExcNetwork) =
## Perform protocol initialization
##
proc peerEventHandler(peerId: PeerID, event: PeerEvent) {.async.} =
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
if event.kind == PeerEventKind.Joined:
b.setupPeer(peerId)
else:
@ -339,7 +339,7 @@ proc new*(
inflightSema: newAsyncSemaphore(maxInflight))
proc sendWantList(
id: PeerID,
id: PeerId,
cids: seq[Cid],
priority: int32 = 0,
cancel: bool = false,
@ -350,16 +350,16 @@ proc new*(
id, cids, priority, cancel,
wantType, full, sendDontHave)
proc sendBlocks(id: PeerID, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
proc sendBlocks(id: PeerId, blocks: seq[bt.Block]): Future[void] {.gcsafe.} =
self.sendBlocks(id, blocks)
proc sendPresence(id: PeerID, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerID, account: Account): Future[void] {.gcsafe.} =
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
self.sendAccount(id, account)
proc sendPayment(id: PeerID, payment: SignedState): Future[void] {.gcsafe.} =
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
self.sendPayment(id, payment)
self.request = BlockExcRequest(

View File

@ -46,7 +46,7 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
while not conn.atEof or not conn.closed:
let
data = await conn.readLp(MaxMessageSize)
msg = Message.ProtobufDecode(data).mapFailure().tryGet()
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Got message for peer", peer = b.id
await b.handler(b, msg)
except CatchableError as exc:
@ -70,7 +70,7 @@ proc send*(b: NetworkPeer, msg: Message) {.async.} =
return
trace "Sending message to remote", peer = b.id
await conn.writeLp(ProtobufEncode(msg))
await conn.writeLp(protobufEncode(msg))
proc broadcast*(b: NetworkPeer, msg: Message) =
proc sendAwaiter() {.async.} =

View File

@ -27,7 +27,7 @@ logScope:
type
BlockExcPeerCtx* = ref object of RootObj
id*: PeerID
id*: PeerId
blocks*: Table[Cid, Presence] # remote peer have list including price
peerWants*: seq[Entry] # remote peers want lists
exchanged*: int # times peer has exchanged with us

View File

@ -29,30 +29,30 @@ logScope:
type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerID, BlockExcPeerCtx]
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
for p in self.peers.values:
yield p
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerID): bool =
proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
## Convenience method to check for peer precense
##
a.anyIt( it.id == b )
func contains*(self: PeerCtxStore, peerId: PeerID): bool =
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers
func add*(self: PeerCtxStore, peer: BlockExcPeerCtx) =
trace "Adding peer to peer context store", peer = peer.id
self.peers[peer.id] = peer
func remove*(self: PeerCtxStore, peerId: PeerID) =
func remove*(self: PeerCtxStore, peerId: PeerId) =
trace "Removing peer from peer context store", peer = peerId
self.peers.del(peerId)
func get*(self: PeerCtxStore, peerId: PeerID): BlockExcPeerCtx =
func get*(self: PeerCtxStore, peerId: PeerId): BlockExcPeerCtx =
trace "Retrieving peer from peer context store", peer = peerId
self.peers.getOrDefault(peerId, nil)
@ -94,4 +94,4 @@ func selectCheapest*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
proc new*(T: type PeerCtxStore): PeerCtxStore =
T(
peers: initOrderedTable[PeerID, BlockExcPeerCtx]())
peers: initOrderedTable[PeerId, BlockExcPeerCtx]())

View File

@ -13,7 +13,7 @@ import pkg/libp2p
import message
export Message, ProtobufEncode, ProtobufDecode
export Message, protobufEncode, protobufDecode
export Wantlist, WantType, Entry
export Block, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate

View File

@ -98,7 +98,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
ipb.finish()
pb.write(field, ipb)
proc ProtobufEncode*(value: Message): seq[byte] =
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, value.wantlist)
for v in value.payload:
@ -172,7 +172,7 @@ proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChan
discard ? pb.getField(1, value.update)
ok(value)
proc ProtobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var
value = Message()
pb = initProtoBuffer(msg)

View File

@ -37,11 +37,11 @@ type
template EmptyCid*: untyped =
var
emptyCid {.global, threadvar.}:
EmptyCid {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Cid]]
once:
emptyCid = [
EmptyCid = [
CIDv0: {
multiCodec("sha2-256"): Cid
.init("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")
@ -54,15 +54,15 @@ template EmptyCid*: untyped =
}.toTable,
]
emptyCid
EmptyCid
template EmptyDigests*: untyped =
var
emptyDigests {.global, threadvar.}:
EmptyDigests {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, MultiHash]]
once:
emptyDigests = [
EmptyDigests = [
CIDv0: {
multiCodec("sha2-256"): EmptyCid[CIDv0]
.catch
@ -83,15 +83,15 @@ template EmptyDigests*: untyped =
}.toTable,
]
emptyDigests
EmptyDigests
template EmptyBlock*: untyped =
var
emptyBlock {.global, threadvar.}:
EmptyBlock {.global, threadvar.}:
array[CIDv0..CIDv1, Table[MultiCodec, Block]]
once:
emptyBlock = [
EmptyBlock = [
CIDv0: {
multiCodec("sha2-256"): Block(
cid: EmptyCid[CIDv0][multiCodec("sha2-256")])
@ -102,7 +102,7 @@ template EmptyBlock*: untyped =
}.toTable,
]
emptyBlock
EmptyBlock
proc isEmpty*(cid: Cid): bool =
cid == EmptyCid[cid.cidver]

View File

@ -57,7 +57,7 @@ proc toNodeId*(host: ca.Address): NodeId =
proc findPeer*(
d: Discovery,
peerId: PeerID): Future[?PeerRecord] {.async.} =
peerId: PeerId): Future[?PeerRecord] {.async.} =
let
node = await d.protocol.resolve(toNodeId(peerId))

View File

@ -168,7 +168,7 @@ proc decode*(
new_manifest = encoded.len
var
decoder = self.decoderProvider(encoded.blockSize, encoded.K, encoded.M)
decoder = self.decoderProvider(encoded.blockSize, encoded.ecK, encoded.ecM)
try:
for i in 0..<encoded.steps:
@ -187,9 +187,9 @@ proc decode*(
await sleepAsync(10.millis)
var
data = newSeq[seq[byte]](encoded.K) # number of blocks to encode
parityData = newSeq[seq[byte]](encoded.M)
recovered = newSeqWith[seq[byte]](encoded.K, newSeq[byte](encoded.blockSize))
data = newSeq[seq[byte]](encoded.ecK) # number of blocks to encode
parityData = newSeq[seq[byte]](encoded.ecM)
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize))
idxPendingBlocks = pendingBlocks # copy futures to make using with `one` easier
emptyBlock = newSeq[byte](encoded.blockSize)
resolved = 0
@ -197,7 +197,7 @@ proc decode*(
while true:
# Continue to receive blocks until we have just enough for decoding
# or no more blocks can arrive
if (resolved >= encoded.K) or (idxPendingBlocks.len == 0):
if (resolved >= encoded.ecK) or (idxPendingBlocks.len == 0):
break
let
@ -210,9 +210,9 @@ proc decode*(
trace "Failed retrieving block", error = error.msg
continue
if idx >= encoded.K:
if idx >= encoded.ecK:
trace "Retrieved parity block", cid = blk.cid, idx
shallowCopy(parityData[idx - encoded.K], if blk.isEmpty: emptyBlock else: blk.data)
shallowCopy(parityData[idx - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
else:
trace "Retrieved data block", cid = blk.cid, idx
shallowCopy(data[idx], if blk.isEmpty: emptyBlock else: blk.data)
@ -223,7 +223,7 @@ proc decode*(
dataPieces = data.filterIt( it.len > 0 ).len
parityPieces = parityData.filterIt( it.len > 0 ).len
if dataPieces >= encoded.K:
if dataPieces >= encoded.ecK:
trace "Retrieved all the required data blocks", data = dataPieces, parity = parityPieces
continue
@ -234,7 +234,7 @@ proc decode*(
trace "Unable to decode manifest!", err = $err.error
return failure($err.error)
for i in 0..<encoded.K:
for i in 0..<encoded.ecK:
if data[i].len <= 0:
without blk =? bt.Block.new(recovered[i]), error:
trace "Unable to create block!", exc = error.msg

View File

@ -68,8 +68,8 @@ func encode*(_: DagPBCoder, manifest: Manifest): ?!seq[byte] =
header.write(5, manifest.originalBytes.uint64)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.K.uint32)
erasureInfo.write(2, manifest.M.uint32)
erasureInfo.write(1, manifest.ecK.uint32)
erasureInfo.write(2, manifest.ecM.uint32)
erasureInfo.write(3, manifest.originalCid.data.buffer)
erasureInfo.write(4, manifest.originalLen.uint32)
erasureInfo.finish()
@ -95,7 +95,7 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
blockSize: uint32
blocksLen: uint32
originalLen: uint32
K, M: uint32
ecK, ecM: uint32
blocks: seq[Cid]
# Decode `Header` message
@ -119,10 +119,10 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
return failure("Unable to decode `erasureInfo` from manifest!")
if pbErasureInfo.buffer.len > 0:
if pbErasureInfo.getField(1, K).isErr:
if pbErasureInfo.getField(1, ecK).isErr:
return failure("Unable to decode `K` from manifest!")
if pbErasureInfo.getField(2, M).isErr:
if pbErasureInfo.getField(2, ecM).isErr:
return failure("Unable to decode `M` from manifest!")
if pbErasureInfo.getField(3, originalCid).isErr:
@ -157,8 +157,8 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
protected: pbErasureInfo.buffer.len > 0)
if self.protected:
self.K = K.int
self.M = M.int
self.ecK = ecK.int
self.ecM = ecM.int
self.originalCid = ? Cid.init(originalCid).mapFailure
self.originalLen = originalLen.int

View File

@ -83,11 +83,11 @@ func bytes*(self: Manifest, pad = true): int =
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalLen, self.K)
roundUp(self.originalLen, self.ecK)
func steps*(self: Manifest): int =
## Number of EC groups in *protected* manifest
divUp(self.originalLen, self.K)
divUp(self.originalLen, self.ecK)
func verify*(self: Manifest): ?!void =
## Check manifest correctness
@ -97,7 +97,7 @@ func verify*(self: Manifest): ?!void =
if divUp(self.originalBytes, self.blockSize) != originalLen:
return failure newException(CodexError, "Broken manifest: wrong originalBytes")
if self.protected and (self.len != self.steps * (self.K + self.M)):
if self.protected and (self.len != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalLen")
return success()
@ -184,7 +184,7 @@ proc new*(
proc new*(
T: type Manifest,
manifest: Manifest,
K, M: int): ?!Manifest =
ecK, ecM: int): ?!Manifest =
## Create an erasure protected dataset from an
## un-protected one
##
@ -197,12 +197,12 @@ proc new*(
originalBytes: manifest.originalBytes,
blockSize: manifest.blockSize,
protected: true,
K: K, M: M,
ecK: ecK, ecM: ecM,
originalCid: ? manifest.cid,
originalLen: manifest.len)
let
encodedLen = self.rounded + (self.steps * M)
encodedLen = self.rounded + (self.steps * ecM)
self.blocks = newSeq[Cid](encodedLen)

View File

@ -37,8 +37,8 @@ type
codec*: MultiCodec # Data set codec
case protected*: bool # Protected datasets have erasure coded info
of true:
K*: int # Number of blocks to encode
M*: int # Number of resulting parity blocks
ecK*: int # Number of blocks to encode
ecM*: int # Number of resulting parity blocks
originalCid*: Cid # The original Cid of the dataset being erasure coded
originalLen*: int # The length of the original manifest
else:

View File

@ -45,7 +45,7 @@ type
CodexNodeRef* = ref object
switch*: Switch
networkId*: PeerID
networkId*: PeerId
blockStore*: BlockStore
engine*: BlockExcEngine
erasure*: Erasure
@ -54,12 +54,12 @@ type
proc findPeer*(
node: CodexNodeRef,
peerId: PeerID): Future[?PeerRecord] {.async.} =
peerId: PeerId): Future[?PeerRecord] {.async.} =
return await node.discovery.findPeer(peerId)
proc connect*(
node: CodexNodeRef,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[void] =
node.switch.connect(peerId, addrs)
@ -288,7 +288,7 @@ proc requestStorage*(self: CodexNodeRef,
erasure: StorageErasure(
totalChunks: encoded.len.uint64,
),
por: StoragePor(
por: StoragePoR(
u: @[], # TODO: PoR setup
publicKey: @[], # TODO: PoR setup
name: @[] # TODO: PoR setup

View File

@ -50,7 +50,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
router.api(
MethodGet,
"/api/codex/v1/connect/{peerId}") do (
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]) -> RestApiResponse:
## Connect to a peer
##

View File

@ -37,7 +37,7 @@ proc encodeString*(peerId: PeerId): Result[string, cstring] =
ok($peerId)
proc decodeString*(T: type PeerId, value: string): Result[PeerId, cstring] =
PeerID.init(value)
PeerId.init(value)
proc encodeString*(address: MultiAddress): Result[string, cstring] =
ok($address)

View File

@ -39,7 +39,7 @@ proc fromJson*(_: type StorageRequestParams,
func `%`*(address: Address): JsonNode =
% $address
func `%`*(stint: StInt|StUInt): JsonNode =
func `%`*(stint: StInt|StUint): JsonNode =
%("0x" & stint.toHex)
func `%`*(arr: openArray[byte]): JsonNode =

View File

@ -125,8 +125,8 @@ type
# PoR query element
QElement* = object
I*: int64
V*: blst_scalar
i*: int64
v*: blst_scalar
PoR* = object
ssk*: SecretKey
@ -157,7 +157,7 @@ proc getSector(
##
var res: ZChar
stream.setPos(((blockid * spb + sectorid) * ZChar.len).int)
stream.setPos(((blockId * spb + sectorId) * ZChar.len).int)
discard await stream.readOnce(addr res[0], ZChar.len)
return res
@ -165,8 +165,8 @@ proc rndScalar(): blst_scalar =
## Generate random scalar within the subroup order r
##
var scal {.noInit.}: array[32, byte]
var scalar {.noInit.}: blst_scalar
var scal : array[32, byte]
var scalar : blst_scalar
while true:
for val in scal.mitems:
@ -183,7 +183,7 @@ proc rndP2(): (blst_p2, blst_scalar) =
##
var
x {.noInit.}: blst_p2
x : blst_p2
x.blst_p2_from_affine(BLS12_381_G2) # init from generator
let
@ -195,7 +195,7 @@ proc rndP2(): (blst_p2, blst_scalar) =
proc rndP1(): (blst_p1, blst_scalar) =
## Generate random point on G1
var
x {.noInit.}: blst_p1
x : blst_p1
x.blst_p1_from_affine(BLS12_381_G1) # init from generator
let
@ -316,8 +316,8 @@ proc generateQuery*(tau: Tau, l: int): seq[QElement] =
for i in 0..<l:
var q: QElement
q.I = Rng.instance.rand(n-1) #TODO: dedup
q.V = rndScalar() #TODO: fix range
q.i = Rng.instance.rand(n-1) #TODO: dedup
q.v = rndScalar() #TODO: fix range
result.add(q)
proc generateProof*(
@ -337,13 +337,13 @@ proc generateProof*(
for qelem in q:
let
sect = fromBytesBE((await stream.getSector(qelem.I, j, s)))
sect = fromBytesBE((await stream.getSector(qelem.i, j, s)))
var
x, v, sector: blst_fr
sector.blst_fr_from_scalar(sect)
v.blst_fr_from_scalar(qelem.V)
v.blst_fr_from_scalar(qelem.v)
x.blst_fr_mul(v, sector)
muj.blst_fr_add(muj, x)
@ -360,7 +360,7 @@ proc generateProof*(
var
prod: blst_p1
prod.blst_p1_mult(authenticators[qelem.I], qelem.V, 255)
prod.blst_p1_mult(authenticators[qelem.i], qelem.v, 255)
sigma.blst_p1_add_or_double(sigma, prod)
return Proof(mu: mu, sigma: sigma)
@ -411,7 +411,7 @@ proc verifyProof*(
var first: blst_p1
for qelem in q:
var prod: blst_p1
prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.I), qelem.V, 255)
prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.i), qelem.v, 255)
first.blst_p1_add_or_double(first, prod)
doAssert(blst_p1_on_curve(first).bool)
@ -426,7 +426,7 @@ proc verifyProof*(
var sum: blst_p1
sum.blst_p1_add_or_double(first, second)
var g {.noInit.}: blst_p2
var g : blst_p2
g.blst_p2_from_affine(BLS12_381_G2)
return verifyPairings(sum, self.spk.key, sigma, g)

View File

@ -73,7 +73,7 @@ method listBlocks*(
raiseAssert("Not implemented!")
method close*(self: Blockstore): Future[void] {.base.} =
method close*(self: BlockStore): Future[void] {.base.} =
## Close the blockstore, cleaning up resources managed by it.
## For some implementations this may be a no-op
##

View File

@ -49,7 +49,7 @@ proc secureWriteFile*[T: byte|char](path: string,
else:
writeFile(path, data, 0o600)
proc checkSecureFile*(path: string): IOResult[bool] =
proc checkSecureFile*(path: string): IoResult[bool] =
when defined(windows):
checkCurrentUserOnlyACL(path)
else:

View File

@ -69,6 +69,8 @@ else:
--define:metrics
# for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames
--styleCheck:usages
--styleCheck:error
when (NimMajor, NimMinor) >= (1, 4):
--warning:"ObservableStores:off"

View File

@ -149,7 +149,7 @@ suite "Block Advertising and Discovery":
pendingBlocks = blocks.mapIt(
engine.pendingBlocks.getWantHandle(it.cid)
)
peerId = PeerID.example
peerId = PeerId.example
haves = collect(initTable()):
for blk in blocks:
{ blk.cid: Presence(cid: blk.cid, price: 0.u256) }

View File

@ -153,7 +153,7 @@ suite "Test Discovery Engine":
check cid == blocks[0].cid
check peerStore.len < minPeers
var
peerCtx = BlockExcPeerCtx(id: PeerID.example)
peerCtx = BlockExcPeerCtx(id: PeerId.example)
peerCtx.blocks[cid] = Presence(cid: cid, price: 0.u256)
peerStore.add(peerCtx)

View File

@ -24,7 +24,7 @@ suite "NetworkStore engine basic":
var
rng: Rng
seckey: PrivateKey
peerId: PeerID
peerId: PeerId
chunker: Chunker
wallet: WalletRef
blockDiscovery: Discovery
@ -36,7 +36,7 @@ suite "NetworkStore engine basic":
setup:
rng = Rng.instance()
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
wallet = WalletRef.example
blockDiscovery = Discovery.new()
@ -54,7 +54,7 @@ suite "NetworkStore engine basic":
test "Should send want list to new peers":
proc sendWantList(
id: PeerID,
id: PeerId,
cids: seq[Cid],
priority: int32 = 0,
cancel: bool = false,
@ -94,7 +94,7 @@ suite "NetworkStore engine basic":
test "Should send account to new peers":
let pricing = Pricing.example
proc sendAccount(peer: PeerID, account: Account) {.gcsafe, async.} =
proc sendAccount(peer: PeerId, account: Account) {.gcsafe, async.} =
check account.address == pricing.address
done.complete()
@ -129,7 +129,7 @@ suite "NetworkStore engine handlers":
var
rng: Rng
seckey: PrivateKey
peerId: PeerID
peerId: PeerId
chunker: Chunker
wallet: WalletRef
blockDiscovery: Discovery
@ -154,7 +154,7 @@ suite "NetworkStore engine handlers":
blocks.add(bt.Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
wallet = WalletRef.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
@ -204,7 +204,7 @@ suite "NetworkStore engine handlers":
done = newFuture[void]()
wantList = makeWantList(blocks.mapIt( it.cid ))
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
done.complete()
@ -226,7 +226,7 @@ suite "NetworkStore engine handlers":
blocks.mapIt( it.cid ),
sendDontHave = true)
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt( it.cid ) == wantList.entries.mapIt( it.`block` )
for p in presence:
check:
@ -248,7 +248,7 @@ suite "NetworkStore engine handlers":
blocks.mapIt( it.cid ),
sendDontHave = true)
proc sendPresence(peerId: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
let
cid1Buf = blocks[0].cid.data.buffer
cid2Buf = blocks[1].cid.data.buffer
@ -297,7 +297,7 @@ suite "NetworkStore engine handlers":
engine.network = BlockExcNetwork(
request: BlockExcRequest(
sendPayment: proc(receiver: PeerID, payment: SignedState) {.gcsafe, async.} =
sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} =
let
amount =
blocks.mapIt(
@ -319,7 +319,7 @@ suite "NetworkStore engine handlers":
handles: Table[Cid, Future[bt.Block]]
proc sendWantList(
id: PeerID,
id: PeerId,
cids: seq[Cid],
priority: int32 = 0,
cancel: bool = false,
@ -356,7 +356,7 @@ suite "Task Handler":
var
rng: Rng
seckey: PrivateKey
peerId: PeerID
peerId: PeerId
chunker: Chunker
wallet: WalletRef
blockDiscovery: Discovery
@ -368,7 +368,7 @@ suite "Task Handler":
localStore: BlockStore
peersCtx: seq[BlockExcPeerCtx]
peers: seq[PeerID]
peers: seq[PeerId]
blocks: seq[bt.Block]
setup:
@ -382,7 +382,7 @@ suite "Task Handler":
blocks.add(bt.Block.new(chunk).tryGet())
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
wallet = WalletRef.example
blockDiscovery = Discovery.new()
peerStore = PeerCtxStore.new()
@ -409,7 +409,7 @@ suite "Task Handler":
for i in 0..3:
let seckey = PrivateKey.random(rng[]).tryGet()
peers.add(PeerID.init(seckey.getPublicKey().tryGet()).tryGet())
peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet())
peersCtx.add(BlockExcPeerCtx(
id: peers[i]
@ -420,7 +420,7 @@ suite "Task Handler":
test "Should send want-blocks in priority order":
proc sendBlocks(
id: PeerID,
id: PeerId,
blks: seq[bt.Block]) {.gcsafe, async.} =
check blks.len == 2
check:
@ -458,7 +458,7 @@ suite "Task Handler":
let missing = @[bt.Block.new("missing".toBytes).tryGet()]
let price = (!engine.pricing).price
proc sendPresence(id: PeerID, presence: seq[BlockPresence]) {.gcsafe, async.} =
proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} =
check presence.mapIt(!Presence.init(it)) == @[
Presence(cid: present[0].cid, have: true, price: price),
Presence(cid: present[1].cid, have: true, price: price),

View File

@ -18,7 +18,7 @@ suite "Network - Handlers":
let
rng = Rng.instance()
seckey = PrivateKey.random(rng[]).tryGet()
peerId = PeerID.init(seckey.getPublicKey().tryGet()).tryGet()
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
var
@ -49,7 +49,7 @@ suite "Network - Handlers":
discard await networkPeer.connect()
test "Want List handler":
proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
@ -71,25 +71,25 @@ suite "Network - Handlers":
true, true)
let msg = Message(wantlist: wantList)
await buffer.pushData(lenPrefix(ProtobufEncode(msg)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
test "Blocks Handler":
proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} =
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
done.complete()
network.handlers.onBlocks = blocksHandler
let msg = Message(payload: makeBlocks(blocks))
await buffer.pushData(lenPrefix(ProtobufEncode(msg)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
test "Presence Handler":
proc presenceHandler(
peer: PeerID,
peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
@ -105,35 +105,35 @@ suite "Network - Handlers":
cid: it.cid.data.buffer,
type: BlockPresenceType.Have
)))
await buffer.pushData(lenPrefix(ProtobufEncode(msg)))
await buffer.pushData(lenPrefix(protobufEncode(msg)))
await done.wait(500.millis)
test "Handles account messages":
let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} =
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
check received == account
done.complete()
network.handlers.onAccount = handleAccount
let message = Message(account: AccountMessage.init(account))
await buffer.pushData(lenPrefix(ProtobufEncode(message)))
await buffer.pushData(lenPrefix(protobufEncode(message)))
await done.wait(100.millis)
test "Handles payment messages":
let payment = SignedState.example
proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} =
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
check received == payment
done.complete()
network.handlers.onPayment = handlePayment
let message = Message(payment: StateChannelUpdate.init(payment))
await buffer.pushData(lenPrefix(ProtobufEncode(message)))
await buffer.pushData(lenPrefix(protobufEncode(message)))
await done.wait(100.millis)
@ -179,7 +179,7 @@ suite "Network - Senders":
switch2.stop())
test "Send want list":
proc wantListHandler(peer: PeerID, wantList: WantList) {.gcsafe, async.} =
proc wantListHandler(peer: PeerId, wantList: Wantlist) {.gcsafe, async.} =
# check that we got the correct amount of entries
check wantList.entries.len == 4
@ -203,7 +203,7 @@ suite "Network - Senders":
await done.wait(500.millis)
test "send blocks":
proc blocksHandler(peer: PeerID, blks: seq[bt.Block]) {.gcsafe, async.} =
proc blocksHandler(peer: PeerId, blks: seq[bt.Block]) {.gcsafe, async.} =
check blks == blocks
done.complete()
@ -216,7 +216,7 @@ suite "Network - Senders":
test "send presence":
proc presenceHandler(
peer: PeerID,
peer: PeerId,
precense: seq[BlockPresence]) {.gcsafe, async.} =
for b in blocks:
check:
@ -239,7 +239,7 @@ suite "Network - Senders":
test "send account":
let account = Account(address: EthAddress.example)
proc handleAccount(peer: PeerID, received: Account) {.gcsafe, async.} =
proc handleAccount(peer: PeerId, received: Account) {.gcsafe, async.} =
check received == account
done.complete()
@ -251,7 +251,7 @@ suite "Network - Senders":
test "send payment":
let payment = SignedState.example
proc handlePayment(peer: PeerID, received: SignedState) {.gcsafe, async.} =
proc handlePayment(peer: PeerId, received: SignedState) {.gcsafe, async.} =
check received == payment
done.complete()
@ -295,7 +295,7 @@ suite "Network - Test Limits":
test "Concurrent Sends":
let account = Account(address: EthAddress.example)
network2.handlers.onAccount =
proc(peer: PeerID, received: Account) {.gcsafe, async.} =
proc(peer: PeerId, received: Account) {.gcsafe, async.} =
check false
let fut = network1.send(

View File

@ -43,12 +43,12 @@ proc example*(_: type bt.Block): bt.Block =
let bytes = newSeqWith(length, rand(uint8))
bt.Block.new(bytes).tryGet()
proc example*(_: type PeerId): PeerID =
proc example*(_: type PeerId): PeerId =
let key = PrivateKey.random(Rng.instance[]).get
PeerId.init(key.getPublicKey().get).get
proc example*(_: type BlockExcPeerCtx): BlockExcPeerCtx =
BlockExcPeerCtx(id: PeerID.example)
BlockExcPeerCtx(id: PeerId.example)
proc example*(_: type Cid): Cid =
bt.Block.example.cid

View File

@ -29,7 +29,7 @@ proc new*(T: type MockDiscovery): T =
proc findPeer*(
d: Discovery,
peerId: PeerID): Future[?PeerRecord] {.async.} =
peerId: PeerId): Future[?PeerRecord] {.async.} =
return none(PeerRecord)
method find*(

View File

@ -88,7 +88,7 @@ proc commonBlockStoreTests*(
for handle in putHandles:
check not handle.failed
check handle.read.isOK
check handle.read.isOk
let
cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet()
@ -111,7 +111,7 @@ proc commonBlockStoreTests*(
for handle in putHandles:
check not handle.failed
check handle.read.isOK
check handle.read.isOk
let
cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet()
@ -135,7 +135,7 @@ proc commonBlockStoreTests*(
for handle in putHandles:
check not handle.failed
check handle.read.isOK
check handle.read.isOk
let
cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet()

View File

@ -152,7 +152,7 @@ suite "BlockMaintainer":
test "Should handle new blocks":
proc invokeTimerManyTimes(): Future[void] {.async.} =
for i in countUp(0, 10):
for i in countup(0, 10):
await mockTimer.invokeCallback()
blockMaintainer.start()

View File

@ -64,7 +64,7 @@ suite "Erasure encode/decode":
column = rng.rand(encoded.len div encoded.steps) # random column
dropped: seq[Cid]
for _ in 0..<encoded.M:
for _ in 0..<encoded.ecM:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
column.inc(encoded.steps)
@ -92,7 +92,7 @@ suite "Erasure encode/decode":
column = rng.rand(encoded.len div encoded.steps) # random column
dropped: seq[Cid]
for _ in 0..<encoded.M + 1:
for _ in 0..<encoded.ecM + 1:
dropped.add(encoded[column])
(await store.delBlock(encoded[column])).tryGet()
column.inc(encoded.steps)
@ -122,7 +122,7 @@ suite "Erasure encode/decode":
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
for _ in 0..<encoded.M:
for _ in 0..<encoded.ecM:
blocks.add(rng.sample(blockIdx, blocks))
offset.inc
@ -150,7 +150,7 @@ suite "Erasure encode/decode":
let
blockIdx = toSeq(countup(offset, encoded.len - 1, encoded.steps))
for _ in 0..<encoded.M + 1: # NOTE: the +1
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
var idx: int
while true:
idx = rng.sample(blockIdx, blocks)
@ -176,7 +176,7 @@ suite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[0..<encoded.steps * encoded.M]:
for b in encoded.blocks[0..<encoded.steps * encoded.ecM]:
(await store.delBlock(b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()
@ -192,7 +192,7 @@ suite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
for b in encoded.blocks[^(encoded.steps * encoded.M)..^1]:
for b in encoded.blocks[^(encoded.steps * encoded.ecM)..^1]:
(await store.delBlock(b)).tryGet()
discard (await erasure.decode(encoded)).tryGet()

View File

@ -86,8 +86,8 @@ suite "Manifest":
decoded.protected == true
decoded.originalLen == manifest.len
decoded.K == protected.K
decoded.M == protected.M
decoded.ecK == protected.ecK
decoded.ecM == protected.ecM
decoded.originalCid == protected.originalCid
decoded.originalCid == manifest.cid.tryGet()

View File

@ -17,7 +17,7 @@ suite "StoreStream":
stream: StoreStream
# Check that `buf` contains `size` bytes with values start, start+1...
proc sequential_bytes(buf: seq[byte], size: int, start: int): bool =
proc sequentialBytes(buf: seq[byte], size: int, start: int): bool =
for i in 0..<size:
if int(buf[i]) != start+i:
return false
@ -60,7 +60,7 @@ suite "StoreStream":
else:
check read == 4
check sequential_bytes(buf,read,n)
check sequentialBytes(buf,read,n)
n += read
test "Read all blocks == blockSize":
@ -71,7 +71,7 @@ suite "StoreStream":
while not stream.atEof:
let read = (await stream.readOnce(addr buf[0], buf.len))
check read == 10
check sequential_bytes(buf,read,n)
check sequentialBytes(buf,read,n)
n += read
test "Read all blocks > blockSize":
@ -87,7 +87,7 @@ suite "StoreStream":
else:
check read == 1
check sequential_bytes(buf,read,n)
check sequentialBytes(buf,read,n)
n += read
test "Read exact bytes within block boundary":
@ -95,11 +95,11 @@ suite "StoreStream":
buf = newSeq[byte](5)
await stream.readExactly(addr buf[0], 5)
check sequential_bytes(buf,5,0)
check sequentialBytes(buf,5,0)
test "Read exact bytes outside of block boundary":
var
buf = newSeq[byte](15)
await stream.readExactly(addr buf[0], 15)
check sequential_bytes(buf,15,0)
check sequentialBytes(buf,15,0)

View File

@ -37,7 +37,7 @@ proc example*(_: type StorageRequest): StorageRequest =
erasure: StorageErasure(
totalChunks: 12,
),
por: StoragePor(
por: StoragePoR(
u: @(array[480, byte].example),
publicKey: @(array[96, byte].example),
name: @(array[512, byte].example)

2
vendor/nim-blscurve vendored

@ -1 +1 @@
Subproject commit 0237e4e0e914fc19359c18a66406d33bc942775c
Subproject commit 48d8668c5a9a350d3a7ee0c3713ef9a11980a40d

@ -1 +1 @@
Subproject commit 44c198b96a2d687f94c9971f4a3ece02b330347b
Subproject commit 0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa

2
vendor/nim-ethers vendored

@ -1 +1 @@
Subproject commit e462649aecc9dedc1cdc1319a9089d2e40578284
Subproject commit 577e02b8a25198d6897c1f4871b5fd8e1f859e5a

2
vendor/nim-leopard vendored

@ -1 +1 @@
Subproject commit ae043fd262d2cc2f46db4a9f2f8054e73167a970
Subproject commit 1a6f2ab7252426a6ac01482a68b75d0c3b134cf0

@ -1 +1 @@
Subproject commit 8d408ac6cfc9c24ec8b7b65d5993e85050dcbaa9
Subproject commit b3673c7a7a959ccacb393bd9b47e997bbd177f5a