fix: bump nph and refactor build process in makefile (#1410)

This commit is contained in:
Eric 2026-02-19 13:12:45 +11:00 committed by GitHub
parent fef46aee35
commit 4068bcb2ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 90 additions and 102 deletions

View File

@ -214,9 +214,11 @@ NPH:=$(shell dirname $(NIM_BINARY))/nph
build-nph:
ifeq ("$(wildcard $(NPH))","")
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \
mv vendor/nph/src/nph $(shell dirname $(NPH))
echo "nph utility is available at " $(NPH)
cd vendor/nph && \
nimble setup -l && \
nimble build && \
mv ./nph ../../$(shell dirname $(NPH)) && \
echo "nph utility is available at " $(NPH)
endif
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit

View File

@ -20,7 +20,7 @@ proc buildBinary(
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
when compiles(commandLineParams):
when defined(commandLineParams):
for param in commandLineParams():
extra_params &= " " & param
else:

View File

@ -96,13 +96,12 @@ proc stop*(s: CodexServer) {.async.} =
notice "Stopping Storage node"
var futures =
@[
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
var futures = @[
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
]
if s.restServer != nil:
futures.add(s.restServer.stop())

View File

@ -375,13 +375,12 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json =
%RestRepoStore(
totalBlocks: repoStore.totalBlocks,
quotaMaxBytes: repoStore.quotaMaxBytes,
quotaUsedBytes: repoStore.quotaUsedBytes,
quotaReservedBytes: repoStore.quotaReservedBytes,
)
let json = %RestRepoStore(
totalBlocks: repoStore.totalBlocks,
quotaMaxBytes: repoStore.quotaMaxBytes,
quotaUsedBytes: repoStore.quotaUsedBytes,
quotaReservedBytes: repoStore.quotaReservedBytes,
)
return RestApiResponse.response($json, contentType = "application/json")
proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
@ -477,20 +476,16 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
try:
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
let json =
%*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"repo": $conf.dataDir,
"spr":
if node.discovery.dhtRecord.isSome:
node.discovery.dhtRecord.get.toURI
else:
"",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
"storage": {"version": $codexVersion, "revision": $codexRevision},
}
let json = %*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"repo": $conf.dataDir,
"spr":
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
"storage": {"version": $codexVersion, "revision": $codexRevision},
}
# return pretty json for human readability
return RestApiResponse.response(

View File

@ -156,7 +156,7 @@ switch("define", "nimOldCaseObjects")
# Enable compat mode for Chronos V4
switch("define", "chronosHandleException")
# begin Nimble config (version 1)
when system.fileExists("nimble.paths"):
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@ -52,15 +52,14 @@ proc getDebug(
let node = storage[].node
let table = RestRoutingTable.init(node.discovery.protocol.routingTable)
let json =
%*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"spr":
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
}
let json = %*{
"id": $node.switch.peerInfo.peerId,
"addrs": node.switch.peerInfo.addrs.mapIt($it),
"spr":
if node.discovery.dhtRecord.isSome: node.discovery.dhtRecord.get.toURI else: "",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
}
return ok($json)

View File

@ -57,14 +57,14 @@ asyncchecksuite "NetworkStore engine - 2 nodes":
check:
(await allFinished(blocks1[0 .. 3].mapIt(nodeCmps2.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string])
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string])
(await allFinished(blocks2[0 .. 3].mapIt(nodeCmps1.localStore.getBlock(it.cid))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string])
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string])
test "Should send want-have for block":
let blk = bt.Block.new("Block 1".toBytes).tryGet()
@ -138,9 +138,9 @@ asyncchecksuite "NetworkStore - multiple nodes":
check:
(await allFinished(downloadCids.mapIt(downloader.localStore.getBlock(it))))
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string])
.filterIt(it.completed and it.read.isOk)
.mapIt($it.read.get.cid)
.sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string])
test "Should exchange blocks with multiple nodes":
let

View File

@ -97,10 +97,9 @@ template setupAndTearDown*() {.dirty.} =
blockDiscovery = Discovery.new(
switch.peerInfo.privateKey,
announceAddrs =
@[
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should return multiaddress")
],
announceAddrs = @[
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should return multiaddress")
],
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()

View File

@ -205,11 +205,10 @@ checksuite "Test logging output":
check loggedJson("ma", "\"/ip4/127.0.0.1/tcp/0\"")
test "logs seq[MultiAddress] correctly":
let ma =
@[
MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet,
MultiAddress.init("/ip4/127.0.0.2/tcp/1").tryGet,
]
let ma = @[
MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet,
MultiAddress.init("/ip4/127.0.0.2/tcp/1").tryGet,
]
log ma
check logged("ma", "\"@[/ip4/127.0.0.1/tcp/0, /ip4/127.0.0.2/tcp/1]\"")
check loggedJson("ma", "[\"/ip4/127.0.0.1/tcp/0\",\"/ip4/127.0.0.2/tcp/1\"]")

View File

@ -22,18 +22,16 @@ suite "NAT Address Tests":
# Expected results
let
expectedDiscoveryAddrs =
@[
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
]
expectedlibp2pAddrs =
@[
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
]
expectedDiscoveryAddrs = @[
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/udp/1234").expect("valid multiaddr"),
]
expectedlibp2pAddrs = @[
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
MultiAddress.init("/ip4/8.8.8.8/tcp/5000").expect("valid multiaddr"),
]
#ipv6Addr = MultiAddress.init("/ip6/::1/tcp/5000").expect("valid multiaddr")
addrs = @[localAddr, anyAddr, publicAddr]

View File

@ -7,13 +7,12 @@ suite "findIt":
type AnObject = object
attribute1*: int
var objList =
@[
AnObject(attribute1: 1),
AnObject(attribute1: 3),
AnObject(attribute1: 5),
AnObject(attribute1: 3),
]
var objList = @[
AnObject(attribute1: 1),
AnObject(attribute1: 3),
AnObject(attribute1: 5),
AnObject(attribute1: 3),
]
test "should retur index of first object matching predicate":
assert objList.findIt(it.attribute1 == 3) == 1

View File

@ -58,11 +58,10 @@ twonodessuite "REST API":
check (await response.body) != ""
test "node retrieve the metadata", twoNodesConfig:
let headers =
@[
("Content-Type", "text/plain"),
("Content-Disposition", "attachment; filename=\"example.txt\""),
]
let headers = @[
("Content-Type", "text/plain"),
("Content-Disposition", "attachment; filename=\"example.txt\""),
]
let uploadResponse = await client1.uploadRaw("some file contents", headers)
let cid = await uploadResponse.body
let listResponse = await client1.listRaw()
@ -83,11 +82,10 @@ twonodessuite "REST API":
check manifest["mimetype"].getStr() == "text/plain"
test "node set the headers when for download", twoNodesConfig:
let headers =
@[
("Content-Disposition", "attachment; filename=\"example.txt\""),
("Content-Type", "text/plain"),
]
let headers = @[
("Content-Disposition", "attachment; filename=\"example.txt\""),
("Content-Type", "text/plain"),
]
let uploadResponse = await client1.uploadRaw("some file contents", headers)
let cid = await uploadResponse.body

View File

@ -33,17 +33,17 @@ proc request(
async: (raw: true, raises: [CancelledError, HttpError])
.} =
HttpClientRequestRef
.new(
self.session,
url,
httpMethod,
version = HttpVersion11,
flags = {},
maxResponseHeadersSize = HttpMaxHeadersSize,
headers = headers,
body = body.toOpenArrayByte(0, len(body) - 1),
).get
.send()
.new(
self.session,
url,
httpMethod,
version = HttpVersion11,
flags = {},
maxResponseHeadersSize = HttpMaxHeadersSize,
headers = headers,
body = body.toOpenArrayByte(0, len(body) - 1),
).get
.send()
proc post*(
self: CodexClient,

2
vendor/nph vendored

@ -1 +1 @@
Subproject commit c6e03162dc2820d3088660f644818d7040e95791
Subproject commit 2cacf6cc28116e4046e0b67a13545af5c4e756bd