Fix warnings (drops Nim 1.2) (#348)

* [build] disable XCannotRaiseY hint

There are too many {.raises:[Defect].} in the
libraries that we use, drowning out all other
warnings and hints

* [build] disable BareExcept warning

Not yet enabled in a released version of Nim,
so libraries that we depend on have not fixed
this yet, drowning out our own hints and warnings

* [build] disable DotLikeOps warning

dot-like ops were an experiment that is not going
land in Nim

* [build] compile log statements in tests

When running tests, all log statements are compiled.
They are filtered out at runtime during a test run.

* [build] do not build executable when running unit test

It's already built in the integration test

* [build] Fix warnings

- remove unused code
- remove unused imports
- stop using deprecated stuff

* [build] Put compiler flags behind nim version checks

* [CI] remove Nim 1.2 compatibility
This commit is contained in:
markspanbroek 2023-03-09 12:23:45 +01:00 committed by GitHub
parent 9c8a59d150
commit 7a0a48e4a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 232 additions and 344 deletions

View File

@ -90,20 +90,3 @@ jobs:
flags: unittests flags: unittests
name: codecov-umbrella name: codecov-umbrella
verbose: true verbose: true
nim_1_2:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
submodules: recursive
- name: Setup Nimbus Build System
uses: ./.github/actions/nimbus-build-system
with:
os: linux
nim_version: version-1-2
- name: Unit tests
run: make -j${ncpu} test

View File

@ -59,7 +59,7 @@ when isMainModule:
trace "Data dir initialized", dir = $config.dataDir trace "Data dir initialized", dir = $config.dataDir
if not(checkAndCreateDataDir((config.dataDir / "repo").string)): if not(checkAndCreateDataDir((config.dataDir / "repo"))):
# We are unable to access/create data folder or data folder's # We are unable to access/create data folder or data folder's
# permissions are insecure. # permissions are insecure.
quit QuitFailure quit QuitFailure
@ -72,10 +72,10 @@ when isMainModule:
let let
keyPath = keyPath =
if isAbsolute(string config.netPrivKeyFile): if isAbsolute(config.netPrivKeyFile):
string config.netPrivKeyFile config.netPrivKeyFile
else: else:
string config.dataDir / string config.netPrivKeyFile config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!") privateKey = setupKey(keyPath).expect("Should setup private key!")
server = CodexServer.new(config, privateKey) server = CodexServer.new(config, privateKey)

View File

@ -40,12 +40,12 @@ const
DefaultMaxPeersPerRequest* = 10 DefaultMaxPeersPerRequest* = 10
DefaultTaskQueueSize = 100 DefaultTaskQueueSize = 100
DefaultConcurrentTasks = 10 DefaultConcurrentTasks = 10
DefaultMaxRetries = 3 # DefaultMaxRetries = 3
DefaultConcurrentDiscRequests = 10 # DefaultConcurrentDiscRequests = 10
DefaultConcurrentAdvertRequests = 10 # DefaultConcurrentAdvertRequests = 10
DefaultDiscoveryTimeout = 1.minutes # DefaultDiscoveryTimeout = 1.minutes
DefaultMaxQueriedBlocksCache = 1000 # DefaultMaxQueriedBlocksCache = 1000
DefaultMinPeersPerBlock = 3 # DefaultMinPeersPerBlock = 3
type type
TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.} TaskHandler* = proc(task: BlockExcPeerCtx): Future[void] {.gcsafe.}

View File

@ -9,7 +9,6 @@
import std/sequtils import std/sequtils
import std/os import std/os
import std/sugar
import std/tables import std/tables
import pkg/chronicles import pkg/chronicles

View File

@ -245,11 +245,11 @@ proc defaultDataDir*(): string =
getHomeDir() / dataDir getHomeDir() / dataDir
func parseCmdArg*(T: type MultiAddress, input: TaintedString): T func parseCmdArg*(T: type MultiAddress, input: string): T
{.raises: [ValueError, LPError, Defect].} = {.raises: [ValueError, LPError, Defect].} =
MultiAddress.init($input).tryGet() MultiAddress.init($input).tryGet()
proc parseCmdArg*(T: type SignedPeerRecord, uri: TaintedString): T = proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
var res: SignedPeerRecord var res: SignedPeerRecord
try: try:
if not res.fromURI(uri): if not res.fromURI(uri):
@ -260,11 +260,11 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: TaintedString): T =
quit QuitFailure quit QuitFailure
res res
func parseCmdArg*(T: type EthAddress, address: TaintedString): T = func parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get() EthAddress.init($address).get()
# no idea why confutils needs this: # no idea why confutils needs this:
proc completeCmdArg*(T: type EthAddress; val: TaintedString): seq[string] = proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
discard discard
# silly chronicles, colors is a compile-time property # silly chronicles, colors is a compile-time property

View File

@ -136,7 +136,6 @@ func decode*(_: DagPBCoder, data: openArray[byte]): ?!Manifest =
if pbNode.getRepeatedField(2, linksBuf).isOk: if pbNode.getRepeatedField(2, linksBuf).isOk:
for pbLinkBuf in linksBuf: for pbLinkBuf in linksBuf:
var var
blocksBuf: seq[seq[byte]]
blockBuf: seq[byte] blockBuf: seq[byte]
pbLink = initProtoBuffer(pbLinkBuf) pbLink = initProtoBuffer(pbLinkBuf)

View File

@ -131,18 +131,18 @@ proc retrieve*(
without res =? (await node.erasure.decode(manifest)), error: without res =? (await node.erasure.decode(manifest)), error:
trace "Unable to erasure decode manifest", cid, exc = error.msg trace "Unable to erasure decode manifest", cid, exc = error.msg
except CatchableError as exc: except CatchableError as exc:
trace "Exception decoding manifest", cid trace "Exception decoding manifest", cid, exc = exc.msg
# #
asyncSpawn erasureJob() asyncSpawn erasureJob()
else: # else:
# Prefetch the entire dataset into the local store # # Prefetch the entire dataset into the local store
proc prefetchBlocks() {.async, raises: [Defect].} = # proc prefetchBlocks() {.async, raises: [Defect].} =
try: # try:
discard await node.fetchBatched(manifest) # discard await node.fetchBatched(manifest)
except CatchableError as exc: # except CatchableError as exc:
trace "Exception prefetching blocks", exc = exc.msg # trace "Exception prefetching blocks", exc = exc.msg
# # #
# asyncSpawn prefetchBlocks() - temporarily commented out # # asyncSpawn prefetchBlocks() - temporarily commented out
# #
# Retrieve all blocks of the dataset sequentially from the local store or network # Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", cid trace "Creating store stream for manifest", cid
@ -158,7 +158,7 @@ proc retrieve*(
try: try:
await stream.pushData(blk.data) await stream.pushData(blk.data)
except CatchableError as exc: except CatchableError as exc:
trace "Unable to send block", cid trace "Unable to send block", cid, exc = exc.msg
discard discard
finally: finally:
await stream.pushEof() await stream.pushEof()

View File

@ -35,7 +35,6 @@ proc waitUntilPeriod(proving: Proving, period: Period) {.async.} =
await proving.clock.waitUntil(periodicity.periodStart(period).truncate(int64)) await proving.clock.waitUntil(periodicity.periodStart(period).truncate(int64))
proc removeEndedContracts(proving: Proving) {.async.} = proc removeEndedContracts(proving: Proving) {.async.} =
let now = proving.clock.now().u256
var ended: HashSet[SlotId] var ended: HashSet[SlotId]
for id in proving.slots: for id in proving.slots:
let state = await proving.proofs.slotState(id) let state = await proving.proofs.slotState(id)

View File

@ -76,9 +76,9 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
try: try:
await node.connect(peerId.get(), addresses) await node.connect(peerId.get(), addresses)
return RestApiResponse.response("Successfully connected to peer") return RestApiResponse.response("Successfully connected to peer")
except DialFailedError as e: except DialFailedError:
return RestApiResponse.error(Http400, "Unable to dial peer") return RestApiResponse.error(Http400, "Unable to dial peer")
except CatchableError as e: except CatchableError:
return RestApiResponse.error(Http400, "Unknown error dialling peer") return RestApiResponse.error(Http400, "Unknown error dialling peer")
router.api( router.api(
@ -187,7 +187,7 @@ proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
trace "Uploaded file", cid trace "Uploaded file", cid
return RestApiResponse.response($cid) return RestApiResponse.response($cid)
except CancelledError as exc: except CancelledError:
return RestApiResponse.error(Http500) return RestApiResponse.error(Http500)
except AsyncStreamError: except AsyncStreamError:
return RestApiResponse.error(Http500) return RestApiResponse.error(Http500)

View File

@ -1,5 +1,4 @@
import pkg/chronos import pkg/chronos
import pkg/upraises
import pkg/stint import pkg/stint
import ../contracts/requests import ../contracts/requests
import ../utils/asyncspawn import ../utils/asyncspawn

View File

@ -3,7 +3,6 @@ import ../statemachine
import ../salesagent import ../salesagent
import ./errorhandling import ./errorhandling
import ./filled import ./filled
import ./errored
import ./cancelled import ./cancelled
import ./failed import ./failed

View File

@ -134,13 +134,6 @@ type
tau*: Tau tau*: Tau
authenticators*: seq[blst_p1] authenticators*: seq[blst_p1]
proc fromBytesBE(a: array[32, byte]): blst_scalar =
## Convert data to blst native form
##
blst_scalar_from_bendian(result, a)
doAssert(blst_scalar_fr_check(result).bool)
proc fromBytesBE(a: openArray[byte]): blst_scalar = proc fromBytesBE(a: openArray[byte]): blst_scalar =
## Convert data to blst native form ## Convert data to blst native form
## ##
@ -269,26 +262,6 @@ proc hashNameI(name: array[Namelen, byte], i: int64): blst_p1 =
bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i)) bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i))
return hashToG1(namei) return hashToG1(namei)
proc generateAuthenticatorNaive(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero): Future[blst_p1] {.async.} =
## Naive implementation of authenticator as in the S&W paper.
## With the paper's multiplicative notation:
## \sigmai=\(H(file||i)\cdot\prod{j=0}^{s-1}{uj^{m[i][j]}})^{\alpha}
##
var sum: blst_p1
for j in 0..<s:
var prod: blst_p1
prod.blst_p1_mult(t.u[j], fromBytesBE((await stream.getSector(i, j, s))), 255)
sum.blst_p1_add_or_double(sum, prod)
blst_p1_add_or_double(result, hashNameI(t.name, i), sum)
result.blst_p1_mult(result, ssk.key, 255)
proc generateAuthenticatorOpt( proc generateAuthenticatorOpt(
stream: SeekableStream, stream: SeekableStream,
ssk: SecretKey, ssk: SecretKey,
@ -412,32 +385,6 @@ proc verifyPairingsNaive(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : b
let e2 = pairing(b1, b2) let e2 = pairing(b1, b2)
return e1 == e2 return e1 == e2
proc verifyPairingsNeg(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
## Faster pairing verification using 2 miller loops but ony one final exponentiation
## based on https://github.com/benjaminion/c-kzg/blob/main/src/bls12_381.c
##
var
loop0, loop1, gt_point: blst_fp12
aa1, bb1: blst_p1_affine
aa2, bb2: blst_p2_affine
var a1neg = a1
blst_p1_cneg(a1neg, 1)
blst_p1_to_affine(aa1, a1neg)
blst_p1_to_affine(bb1, b1)
blst_p2_to_affine(aa2, a2)
blst_p2_to_affine(bb2, b2)
blst_miller_loop(loop0, aa2, aa1)
blst_miller_loop(loop1, bb2, bb1)
blst_fp12_mul(gt_point, loop0, loop1)
blst_final_exp(gt_point, gt_point)
return blst_fp12_is_one(gt_point).bool
proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
## Wrapper to select verify pairings implementation ## Wrapper to select verify pairings implementation
## ##

View File

@ -1,6 +1,5 @@
import pkg/chronos import pkg/chronos
import pkg/stint import pkg/stint
import pkg/questionable
import pkg/upraises import pkg/upraises
import ./periods import ./periods
import ../../contracts/requests import ../../contracts/requests

View File

@ -7,7 +7,6 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import std/sequtils
import pkg/upraises import pkg/upraises
push: {.upraises: [].} push: {.upraises: [].}

View File

@ -10,7 +10,6 @@
import pkg/upraises import pkg/upraises
push: {.upraises: [].} push: {.upraises: [].}
import pkg/chronicles
import pkg/questionable/results import pkg/questionable/results
import pkg/datastore import pkg/datastore
import pkg/libp2p import pkg/libp2p

View File

@ -7,7 +7,6 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import std/sequtils
import pkg/upraises import pkg/upraises
push: {.upraises: [].} push: {.upraises: [].}
@ -23,7 +22,6 @@ import pkg/stew/endians2
import ./blockstore import ./blockstore
import ./keyutils import ./keyutils
import ../blocktype import ../blocktype
import ../namespaces
import ../clock import ../clock
import ../systemclock import ../systemclock

View File

@ -58,7 +58,7 @@ proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} =
break break
if i > 0: if i > 0:
waiters.delete(0, i - 1) waiters.delete(0..(i-1))
proc heapCmp[T](x, y: T, max: bool = false): bool {.inline.} = proc heapCmp[T](x, y: T, max: bool = false): bool {.inline.} =
if max: if max:

View File

@ -13,15 +13,10 @@ push: {.upraises: [].}
import pkg/chronicles import pkg/chronicles
import pkg/questionable/results import pkg/questionable/results
import pkg/libp2p import pkg/libp2p
import pkg/datastore
import ./fileutils import ./fileutils
import ../errors import ../errors
import ../rng import ../rng
import ../namespaces
const
SafePermissions = {UserRead, UserWrite}
type type
CodexKeyError = object of CodexError CodexKeyError = object of CodexError

View File

@ -70,6 +70,15 @@ else:
# for heap-usage-by-instance-type metrics and object base-type strings # for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames --define:nimTypeNames
when (NimMajor, NimMinor) >= (1, 4):
--warning:"ObservableStores:off"
--warning:"LockLevel:off"
--hint:"XCannotRaiseY:off"
when (NimMajor, NimMinor) >= (1, 6):
--warning:"DotLikeOps:off"
when (NimMajor, NimMinor, NimPatch) >= (1, 6, 11):
--warning:"BareExcept:off"
switch("define", "withoutPCRE") switch("define", "withoutPCRE")
# the default open files limit is too low on macOS (512), breaking the # the default open files limit is too low on macOS (512), breaking the

View File

@ -1,3 +0,0 @@
-d:"chronicles_log_level=INFO"
--warning:LockLevel:off
--warning:ObservableStores:off

View File

@ -231,15 +231,15 @@ suite "E2E - Multiple Nodes Discovery":
MockDiscovery(blockexc[1].engine.discovery.discovery) MockDiscovery(blockexc[1].engine.discovery.discovery)
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised.add(cid, switch[1].peerInfo.signedPeerRecord) advertised[cid] = switch[1].peerInfo.signedPeerRecord
MockDiscovery(blockexc[2].engine.discovery.discovery) MockDiscovery(blockexc[2].engine.discovery.discovery)
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised.add(cid, switch[2].peerInfo.signedPeerRecord) advertised[cid] = switch[2].peerInfo.signedPeerRecord
MockDiscovery(blockexc[3].engine.discovery.discovery) MockDiscovery(blockexc[3].engine.discovery.discovery)
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
advertised.add(cid, switch[3].peerInfo.signedPeerRecord) advertised[cid] = switch[3].peerInfo.signedPeerRecord
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5]) await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10]) await blockexc[2].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[4..10])

View File

@ -28,7 +28,6 @@ suite "Test Discovery Engine":
peerStore: PeerCtxStore peerStore: PeerCtxStore
blockDiscovery: MockDiscovery blockDiscovery: MockDiscovery
pendingBlocks: PendingBlocksManager pendingBlocks: PendingBlocksManager
localStore: CacheStore
network: BlockExcNetwork network: BlockExcNetwork
setup: setup:

View File

@ -169,13 +169,10 @@ suite "NetworkStore engine - 2 nodes":
nodeCmps2.networkStore.putBlock(it) nodeCmps2.networkStore.putBlock(it)
)) ))
let discard await allFinished(
blocks = await allFinished( blocks2[4..7].mapIt(
blocks2[4..7].mapIt( nodeCmps1.networkStore.getBlock(it.cid)
nodeCmps1.networkStore.getBlock(it.cid) ))
))
# await sleepAsync(10.seconds)
let let
channel = !peerCtx1.paymentChannel channel = !peerCtx1.paymentChannel

View File

@ -140,7 +140,6 @@ suite "NetworkStore engine handlers":
discovery: DiscoveryEngine discovery: DiscoveryEngine
peerCtx: BlockExcPeerCtx peerCtx: BlockExcPeerCtx
localStore: BlockStore localStore: BlockStore
done: Future[void]
blocks: seq[bt.Block] blocks: seq[bt.Block]
setup: setup:
@ -366,7 +365,6 @@ suite "Task Handler":
network: BlockExcNetwork network: BlockExcNetwork
engine: BlockExcEngine engine: BlockExcEngine
discovery: DiscoveryEngine discovery: DiscoveryEngine
peerCtx: BlockExcPeerCtx
localStore: BlockStore localStore: BlockStore
peersCtx: seq[BlockExcPeerCtx] peersCtx: seq[BlockExcPeerCtx]

View File

@ -264,7 +264,6 @@ suite "Network - Test Limits":
var var
switch1, switch2: Switch switch1, switch2: Switch
network1, network2: BlockExcNetwork network1, network2: BlockExcNetwork
blocks: seq[bt.Block]
done: Future[void] done: Future[void]
setup: setup:

View File

@ -9,14 +9,13 @@ import pkg/stew/byteutils
import pkg/codex/blocktype as bt import pkg/codex/blocktype as bt
import pkg/codex/blockexchange import pkg/codex/blockexchange
import ../examples
suite "Pending Blocks": suite "Pending Blocks":
test "Should add want handle": test "Should add want handle":
let let
pendingBlocks = PendingBlocksManager.new() pendingBlocks = PendingBlocksManager.new()
blk = bt.Block.new("Hello".toBytes).tryGet blk = bt.Block.new("Hello".toBytes).tryGet
handle = pendingBlocks.getWantHandle(blk.cid)
discard pendingBlocks.getWantHandle(blk.cid)
check pendingBlocks.pending(blk.cid) check pendingBlocks.pending(blk.cid)
@ -59,7 +58,8 @@ suite "Pending Blocks":
let let
pendingBlocks = PendingBlocksManager.new() pendingBlocks = PendingBlocksManager.new()
blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet )
handles = blks.mapIt( pendingBlocks.getWantHandle( it.cid ) )
discard blks.mapIt( pendingBlocks.getWantHandle( it.cid ) )
check: check:
blks.mapIt( $it.cid ).sorted(cmp[string]) == blks.mapIt( $it.cid ).sorted(cmp[string]) ==

View File

@ -1,5 +1,3 @@
import std/options
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/libp2p/varint import pkg/libp2p/varint

View File

@ -10,8 +10,6 @@
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/questionable import pkg/questionable
import pkg/questionable/results
import pkg/stew/shims/net
import pkg/codex/discovery import pkg/codex/discovery
import pkg/contractabi/address as ca import pkg/contractabi/address as ca

View File

@ -12,7 +12,6 @@ import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/codex/blocktype as bt
import codex/stores/repostore import codex/stores/repostore

View File

@ -1,4 +1,3 @@
import std/os
import std/sequtils import std/sequtils
import pkg/asynctest import pkg/asynctest
@ -22,17 +21,11 @@ import ../helpers
const const
BlockSize = 31 * 64 BlockSize = 31 * 64
SectorSize = 31
SectorsPerBlock = BlockSize div SectorSize
DataSetSize = BlockSize * 100 DataSetSize = BlockSize * 100
suite "Storage Proofs Network": suite "Storage Proofs Network":
let let
rng = Rng.instance() hostAddr = ca.Address.example
seckey1 = PrivateKey.random(rng[]).tryGet()
seckey2 = PrivateKey.random(rng[]).tryGet()
hostAddr1 = ca.Address.example
hostAddr2 = ca.Address.example
blocks = toSeq([1, 5, 10, 14, 20, 12, 22]) # TODO: maybe make them random blocks = toSeq([1, 5, 10, 14, 20, 12, 22]) # TODO: maybe make them random
var var
@ -48,7 +41,6 @@ suite "Storage Proofs Network":
store: BlockStore store: BlockStore
ssk: st.SecretKey ssk: st.SecretKey
spk: st.PublicKey spk: st.PublicKey
stpstore: st.StpStore
porMsg: PorMessage porMsg: PorMessage
cid: Cid cid: Cid
por: PoR por: PoR
@ -104,7 +96,7 @@ suite "Storage Proofs Network":
discovery1.findHostProvidersHandler = proc(d: MockDiscovery, host: ca.Address): discovery1.findHostProvidersHandler = proc(d: MockDiscovery, host: ca.Address):
Future[seq[SignedPeerRecord]] {.async, gcsafe.} = Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
check hostAddr2 == host check hostAddr == host
return @[switch2.peerInfo.signedPeerRecord] return @[switch2.peerInfo.signedPeerRecord]
proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} = proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} =
@ -119,6 +111,6 @@ suite "Storage Proofs Network":
cid, cid,
blocks, blocks,
porMsg.authenticators, porMsg.authenticators,
hostAddr2)).tryGet() hostAddr)).tryGet()
await done.wait(1.seconds) await done.wait(1.seconds)

View File

@ -13,8 +13,6 @@ import ../helpers
const const
BlockSize = 31 * 64 BlockSize = 31 * 64
SectorSize = 31
SectorsPerBlock = BlockSize div SectorSize
DataSetSize = BlockSize * 100 DataSetSize = BlockSize * 100
suite "Test PoR store": suite "Test PoR store":

View File

@ -1,5 +1,4 @@
import std/strutils import std/strutils
import std/options
import pkg/chronos import pkg/chronos
import pkg/asynctest import pkg/asynctest

View File

@ -10,7 +10,6 @@
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/asynctest import pkg/asynctest
import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/codex/blocktype as bt import pkg/codex/blocktype as bt
import pkg/codex/stores/repostore import pkg/codex/stores/repostore

View File

@ -1,5 +1,4 @@
import std/os import std/os
import std/options
import std/strutils import std/strutils
import std/sequtils import std/sequtils
@ -127,7 +126,7 @@ suite "RepoStore":
discard (await metaDs.get(QuotaReservedKey)).tryGet discard (await metaDs.get(QuotaReservedKey)).tryGet
test "Should release bytes": test "Should release bytes":
let blk = createTestBlock(100) discard createTestBlock(100)
check repo.totalUsed == 0 check repo.totalUsed == 0
(await repo.reserve(100)).tryGet (await repo.reserve(100)).tryGet
@ -179,7 +178,7 @@ suite "RepoStore":
check: check:
response.len == 1 response.len == 1
response[0].key.get == expectedKey !response[0].key == expectedKey
response[0].data == expectedExpiration.toBytes response[0].data == expectedExpiration.toBytes
test "Should store block with default expiration timestamp when not provided": test "Should store block with default expiration timestamp when not provided":
@ -196,7 +195,7 @@ suite "RepoStore":
check: check:
response.len == 1 response.len == 1
response[0].key.get == expectedKey !response[0].key == expectedKey
response[0].data == expectedExpiration.toBytes response[0].data == expectedExpiration.toBytes
test "delBlock should remove expiration metadata": test "delBlock should remove expiration metadata":

View File

@ -3,7 +3,6 @@ import std/sequtils
import pkg/asynctest import pkg/asynctest
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/codex/erasure import pkg/codex/erasure

View File

@ -1,7 +1,6 @@
import std/sequtils import std/sequtils
import pkg/chronos import pkg/chronos
import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import pkg/asynctest import pkg/asynctest
import pkg/libp2p import pkg/libp2p

View File

@ -4,7 +4,6 @@ import std/math
import pkg/asynctest import pkg/asynctest
import pkg/chronos import pkg/chronos
import pkg/chronicles
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/nitro import pkg/nitro

View File

@ -1,10 +1,9 @@
import std/times import std/times
import pkg/asynctest import pkg/asynctest
import pkg/chronos import pkg/chronos
import pkg/upraises
import pkg/stint import pkg/stint
import pkg/codex/purchasing import pkg/codex/purchasing
import pkg/codex/purchasing/states/[finished, failed, error, started, submitted, unknown] import pkg/codex/purchasing/states/[finished, error, started, submitted, unknown]
import ./helpers/mockmarket import ./helpers/mockmarket
import ./helpers/mockclock import ./helpers/mockclock
import ./helpers/eventually import ./helpers/eventually

View File

@ -7,8 +7,6 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/questionable
import pkg/chronos import pkg/chronos
import pkg/asynctest import pkg/asynctest

View File

@ -31,12 +31,12 @@ ethersuite "On-Chain Clock":
check clock.now() > past check clock.now() > past
test "raises when not started": test "raises when not started":
expect AssertionError: expect AssertionDefect:
discard OnChainClock.new(provider).now() discard OnChainClock.new(provider).now()
test "raises when stopped": test "raises when stopped":
await clock.stop() await clock.stop()
expect AssertionError: expect AssertionDefect:
discard clock.now() discard clock.now()
test "handles starting multiple times": test "handles starting multiple times":

View File

@ -1,6 +1,5 @@
import std/options import std/options
import pkg/chronos import pkg/chronos
import pkg/ethers/testing
import pkg/stew/byteutils import pkg/stew/byteutils
import codex/contracts import codex/contracts
import codex/contracts/testtoken import codex/contracts/testtoken
@ -49,7 +48,7 @@ ethersuite "On-Chain Market":
test "fails to instantiate when contract does not have a signer": test "fails to instantiate when contract does not have a signer":
let storageWithoutSigner = marketplace.connect(provider) let storageWithoutSigner = marketplace.connect(provider)
expect AssertionError: expect AssertionDefect:
discard OnChainMarket.new(storageWithoutSigner) discard OnChainMarket.new(storageWithoutSigner)
test "knows signer address": test "knows signer address":
@ -119,7 +118,7 @@ ethersuite "On-Chain Market":
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
receivedSlotIndices.add(slotIndex) receivedSlotIndices.add(slotIndex)
let subscription = await market.subscribeSlotFilled(request.id, slotIndex, onSlotFilled) let subscription = await market.subscribeSlotFilled(request.id, slotIndex, onSlotFilled)
await market.fillSlot(request.id, slotIndex - 1, proof) await market.fillSlot(request.id, otherSlot, proof)
check receivedSlotIndices.len == 0 check receivedSlotIndices.len == 0
await market.fillSlot(request.id, slotIndex, proof) await market.fillSlot(request.id, slotIndex, proof)
check receivedSlotIndices == @[slotIndex] check receivedSlotIndices == @[slotIndex]

View File

@ -1,8 +1,6 @@
import codex/contracts import codex/contracts
import codex/contracts/testtoken
import ../ethertest import ../ethertest
import ./examples import ./examples
import ./time
ethersuite "On-Chain Proofs": ethersuite "On-Chain Proofs":

View File

@ -0,0 +1,156 @@
import std/os
import std/httpclient
import std/json
import std/strutils
import pkg/chronos
import ../ethertest
import ../contracts/time
import ../codex/helpers/eventually
import ./nodes
import ./tokens
ethersuite "Integration tests":
var node1, node2: NodeProcess
var baseurl1, baseurl2: string
var client: HttpClient
let dataDir1 = getTempDir() / "Codex1"
let dataDir2 = getTempDir() / "Codex2"
setup:
await provider.getSigner(accounts[0]).mint()
await provider.getSigner(accounts[1]).mint()
await provider.getSigner(accounts[1]).deposit()
baseurl1 = "http://localhost:8080/api/codex/v1"
baseurl2 = "http://localhost:8081/api/codex/v1"
client = newHttpClient()
node1 = startNode([
"--api-port=8080",
"--data-dir=" & dataDir1,
"--nat=127.0.0.1",
"--disc-ip=127.0.0.1",
"--disc-port=8090",
"--persistence",
"--eth-account=" & $accounts[0]
], debug = false)
let
bootstrap = strip(
$(parseJson(client.get(baseurl1 & "/debug/info").body)["spr"]),
chars = {'"'})
node2 = startNode([
"--api-port=8081",
"--data-dir=" & dataDir2,
"--nat=127.0.0.1",
"--disc-ip=127.0.0.1",
"--disc-port=8091",
"--bootstrap-node=" & bootstrap,
"--persistence",
"--eth-account=" & $accounts[1]
], debug = false)
teardown:
client.close()
node1.stop()
node2.stop()
dataDir1.removeDir()
dataDir2.removeDir()
test "nodes can print their peer information":
let info1 = client.get(baseurl1 & "/debug/info").body
let info2 = client.get(baseurl2 & "/debug/info").body
check info1 != info2
test "nodes should set chronicles log level":
client.headers = newHttpHeaders({ "Content-Type": "text/plain" })
let filter = "/debug/chronicles/loglevel?level=DEBUG;TRACE:codex"
check client.request(baseurl1 & filter, httpMethod = HttpPost, body = "").status == "200 OK"
test "node accepts file uploads":
let url = baseurl1 & "/upload"
let response = client.post(url, "some file contents")
check response.status == "200 OK"
test "node handles new storage availability":
let url = baseurl1 & "/sales/availability"
let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"}
check client.post(url, $json).status == "200 OK"
test "node lists storage that is for sale":
let url = baseurl1 & "/sales/availability"
let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"}
let availability = parseJson(client.post(url, $json).body)
let response = client.get(url)
check response.status == "200 OK"
check %*availability in parseJson(response.body)
test "node handles storage request":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let url = baseurl1 & "/storage/request/" & cid
let json = %*{"duration": "0x1", "reward": "0x2"}
let response = client.post(url, $json)
check response.status == "200 OK"
test "node retrieves purchase status":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let request = %*{"duration": "0x1", "reward": "0x2"}
let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body
let response = client.get(baseurl1 & "/storage/purchases/" & id)
check response.status == "200 OK"
let json = parseJson(response.body)
check json["request"]["ask"]["duration"].getStr == "0x1"
check json["request"]["ask"]["reward"].getStr == "0x2"
test "node remembers purchase status after restart":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let request = %*{"duration": "0x1", "reward": "0x2"}
let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body
proc getPurchase(id: string): JsonNode =
let response = client.get(baseurl1 & "/storage/purchases/" & id)
return parseJson(response.body).catch |? nil
check eventually getPurchase(id){"state"}.getStr == "submitted"
node1.restart()
client.close()
client = newHttpClient()
check eventually (not isNil getPurchase(id){"request"}{"ask"})
check getPurchase(id){"request"}{"ask"}{"duration"}.getStr == "0x1"
check getPurchase(id){"request"}{"ask"}{"reward"}.getStr == "0x2"
test "nodes negotiate contracts on the marketplace":
proc sell =
let json = %*{"size": "0xFFFFF", "duration": "0x200", "minPrice": "0x300"}
discard client.post(baseurl2 & "/sales/availability", $json)
proc available: JsonNode =
client.get(baseurl2 & "/sales/availability").body.parseJson
proc upload: string =
client.post(baseurl1 & "/upload", "some file contents").body
proc buy(cid: string): string =
let expiry = ((waitFor provider.currentTime()) + 30).toHex
let json = %*{"duration": "0x1", "reward": "0x400", "expiry": expiry}
client.post(baseurl1 & "/storage/request/" & cid, $json).body
proc finish(purchase: string): Future[JsonNode] {.async.} =
while true:
let response = client.get(baseurl1 & "/storage/purchases/" & purchase)
let json = parseJson(response.body)
if json["state"].getStr == "finished": return json
await sleepAsync(1.seconds)
sell()
let purchase = waitFor upload().buy().finish()
check purchase["error"].getStr == ""
check available().len == 0

View File

@ -1,15 +1,9 @@
import std/osproc
import std/os import std/os
import std/httpclient import std/httpclient
import std/strutils
import std/times
import pkg/chronos import pkg/chronos
import ../ethertest import ../ethertest
import ../contracts/time
import ../codex/helpers/eventually
import ./nodes import ./nodes
import ./tokens
ethersuite "Node block expiration tests": ethersuite "Node block expiration tests":
@ -59,7 +53,7 @@ ethersuite "Node block expiration tests":
let contentId = uploadTestFile() let contentId = uploadTestFile()
await sleepAsync(10 * 1000) await sleepAsync(10.seconds)
let response = downloadTestFile(contentId) let response = downloadTestFile(contentId)
check: check:
@ -71,7 +65,7 @@ ethersuite "Node block expiration tests":
let contentId = uploadTestFile() let contentId = uploadTestFile()
await sleepAsync(10 * 1000) await sleepAsync(10.seconds)
expect TimeoutError: expect TimeoutError:
discard downloadTestFile(contentId) discard downloadTestFile(contentId)

6
tests/logging.nim Normal file
View File

@ -0,0 +1,6 @@
import pkg/chronicles
proc ignoreLogging(level: LogLevel, message: LogOutputStr) =
discard
defaultChroniclesStream.output.writer = ignoreLogging

View File

@ -1,4 +1,6 @@
--path:".." --path:".."
--threads:on --threads:on
--tlsEmulation:off --tlsEmulation:off
-d:chronicles_enabled=off
-d:chronicles_log_level:"NONE" # compile all log statements
-d:chronicles_sinks:"textlines[dynamic]" # allow logs to be filtered by tests

View File

@ -1,3 +1,4 @@
import ./logging
import ./codex/teststores import ./codex/teststores
import ./codex/testblockexchange import ./codex/testblockexchange
import ./codex/teststorageproofs import ./codex/teststorageproofs
@ -14,7 +15,4 @@ import ./codex/testutils
import ./codex/testclock import ./codex/testclock
import ./codex/testsystemclock import ./codex/testsystemclock
# to check that everything compiles
import ../codex
{.warning[UnusedImport]: off.} {.warning[UnusedImport]: off.}

View File

@ -1 +0,0 @@
-d:chronicles_log_level=WARN

View File

@ -1,3 +1,4 @@
import ./logging
import ./contracts/testCollateral import ./contracts/testCollateral
import ./contracts/testContracts import ./contracts/testContracts
import ./contracts/testMarket import ./contracts/testMarket

View File

@ -1 +0,0 @@
-d:chronicles_log_level=WARN

View File

@ -1,159 +1,5 @@
import std/osproc import ./logging
import std/os import ./integration/testIntegration
import std/httpclient
import std/json
import std/strutils
import pkg/chronos
import ./ethertest
import ./contracts/time
import ./integration/nodes
import ./integration/tokens
import ./integration/testblockexpiration import ./integration/testblockexpiration
import ./codex/helpers/eventually
ethersuite "Integration tests": {.warning[UnusedImport]:off.}
var node1, node2: NodeProcess
var baseurl1, baseurl2: string
var client: HttpClient
let dataDir1 = getTempDir() / "Codex1"
let dataDir2 = getTempDir() / "Codex2"
setup:
await provider.getSigner(accounts[0]).mint()
await provider.getSigner(accounts[1]).mint()
await provider.getSigner(accounts[1]).deposit()
baseurl1 = "http://localhost:8080/api/codex/v1"
baseurl2 = "http://localhost:8081/api/codex/v1"
client = newHttpClient()
node1 = startNode([
"--api-port=8080",
"--data-dir=" & dataDir1,
"--nat=127.0.0.1",
"--disc-ip=127.0.0.1",
"--disc-port=8090",
"--persistence",
"--eth-account=" & $accounts[0]
], debug = false)
let
bootstrap = strip(
$(parseJson(client.get(baseurl1 & "/debug/info").body)["spr"]),
chars = {'"'})
node2 = startNode([
"--api-port=8081",
"--data-dir=" & dataDir2,
"--nat=127.0.0.1",
"--disc-ip=127.0.0.1",
"--disc-port=8091",
"--bootstrap-node=" & bootstrap,
"--persistence",
"--eth-account=" & $accounts[1]
], debug = false)
teardown:
client.close()
node1.stop()
node2.stop()
dataDir1.removeDir()
dataDir2.removeDir()
test "nodes can print their peer information":
let info1 = client.get(baseurl1 & "/debug/info").body
let info2 = client.get(baseurl2 & "/debug/info").body
check info1 != info2
test "nodes should set chronicles log level":
client.headers = newHttpHeaders({ "Content-Type": "text/plain" })
let filter = "/debug/chronicles/loglevel?level=DEBUG;TRACE:codex"
check client.request(baseurl1 & filter, httpMethod = HttpPost, body = "").status == "200 OK"
test "node accepts file uploads":
let url = baseurl1 & "/upload"
let response = client.post(url, "some file contents")
check response.status == "200 OK"
test "node handles new storage availability":
let url = baseurl1 & "/sales/availability"
let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"}
check client.post(url, $json).status == "200 OK"
test "node lists storage that is for sale":
let url = baseurl1 & "/sales/availability"
let json = %*{"size": "0x1", "duration": "0x2", "minPrice": "0x3"}
let availability = parseJson(client.post(url, $json).body)
let response = client.get(url)
check response.status == "200 OK"
check %*availability in parseJson(response.body)
test "node handles storage request":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let url = baseurl1 & "/storage/request/" & cid
let json = %*{"duration": "0x1", "reward": "0x2"}
let response = client.post(url, $json)
check response.status == "200 OK"
test "node retrieves purchase status":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let request = %*{"duration": "0x1", "reward": "0x2"}
let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body
let response = client.get(baseurl1 & "/storage/purchases/" & id)
check response.status == "200 OK"
let json = parseJson(response.body)
check json["request"]["ask"]["duration"].getStr == "0x1"
check json["request"]["ask"]["reward"].getStr == "0x2"
test "node remembers purchase status after restart":
let cid = client.post(baseurl1 & "/upload", "some file contents").body
let request = %*{"duration": "0x1", "reward": "0x2"}
let id = client.post(baseurl1 & "/storage/request/" & cid, $request).body
proc getPurchase(id: string): JsonNode =
let response = client.get(baseurl1 & "/storage/purchases/" & id)
return parseJson(response.body).catch |? nil
check eventually getPurchase(id){"state"}.getStr == "submitted"
node1.restart()
client.close()
client = newHttpClient()
check eventually (not isNil getPurchase(id){"request"}{"ask"})
check getPurchase(id){"request"}{"ask"}{"duration"}.getStr == "0x1"
check getPurchase(id){"request"}{"ask"}{"reward"}.getStr == "0x2"
test "nodes negotiate contracts on the marketplace":
proc sell =
let json = %*{"size": "0xFFFFF", "duration": "0x200", "minPrice": "0x300"}
discard client.post(baseurl2 & "/sales/availability", $json)
proc available: JsonNode =
client.get(baseurl2 & "/sales/availability").body.parseJson
proc upload: string =
client.post(baseurl1 & "/upload", "some file contents").body
proc buy(cid: string): string =
let expiry = ((waitFor provider.currentTime()) + 30).toHex
let json = %*{"duration": "0x1", "reward": "0x400", "expiry": expiry}
client.post(baseurl1 & "/storage/request/" & cid, $json).body
proc finish(purchase: string): Future[JsonNode] {.async.} =
while true:
let response = client.get(baseurl1 & "/storage/purchases/" & purchase)
let json = parseJson(response.body)
if json["state"].getStr == "finished": return json
await sleepAsync(1.seconds)
sell()
let purchase = waitFor upload().buy().finish()
check purchase["error"].getStr == ""
check available().len == 0