Merge branch 'master' into feature/blockexc-prevent-retransmit

This commit is contained in:
Ben Bierens 2025-02-10 09:48:28 +01:00 committed by GitHub
commit bc0b46e699
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 70 additions and 49 deletions

View File

@ -97,6 +97,33 @@ runs:
# Set GCC-14 as the default
sudo update-alternatives --set gcc /usr/bin/gcc-14
- name: Install ccache on Linux/Mac
if: inputs.os == 'linux' || inputs.os == 'macos'
uses: hendrikmuhs/ccache-action@v1.2
with:
create-symlink: true
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
evict-old-files: 7d
- name: Install ccache on Windows
if: inputs.os == 'windows'
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
evict-old-files: 7d
- name: Enable ccache on Windows
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
CCACHE_DIR=$(dirname $(which ccache))/ccached
mkdir ${CCACHE_DIR}
ln -s $(which ccache) ${CCACHE_DIR}/gcc.exe
ln -s $(which ccache) ${CCACHE_DIR}/g++.exe
ln -s $(which ccache) ${CCACHE_DIR}/cc.exe
ln -s $(which ccache) ${CCACHE_DIR}/c++.exe
echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2
- name: Derive environment variables
shell: ${{ inputs.shell }} {0}
run: |
@ -154,8 +181,11 @@ runs:
llvm_bin_dir="${llvm_dir}/bin"
llvm_lib_dir="${llvm_dir}/lib"
echo "${llvm_bin_dir}" >> ${GITHUB_PATH}
# Make sure ccache has precedence (GITHUB_PATH is appending before)
echo "$(brew --prefix)/opt/ccache/libexec" >> ${GITHUB_PATH}
echo $PATH
echo "LDFLAGS=${LDFLAGS} -L${libomp_lib_dir} -L${llvm_lib_dir} -Wl,-rpath,${llvm_lib_dir}" >> ${GITHUB_ENV}
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=${llvm_bin_dir}/clang -DCMAKE_CXX_COMPILER=${llvm_bin_dir}/clang++' -d:LeopardExtraCompilerlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release' -d:LeopardExtraCompilerFlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
fi
@ -191,6 +221,7 @@ runs:
- name: Build Nim and Codex dependencies
shell: ${{ inputs.shell }} {0}
run: |
which gcc
gcc --version
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
echo

View File

@ -2,24 +2,8 @@ mode = ScriptMode.Verbose
import std/os except commandLineParams
const VendorPath = "vendor/nim-nat-traversal/vendor/libnatpmp-upstream"
let
oldVersionFile = joinPath(VendorPath, "VERSION")
newVersionFile = joinPath(VendorPath, "VERSION_temp")
proc renameFile(oldName, newName: string) =
if fileExists(oldName):
mvFile(oldName, newName)
else:
echo "File ", oldName, " does not exist"
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
# This is a quick workaround to avoid VERSION file conflict on macOS
# More details here: https://github.com/codex-storage/nim-codex/issues/1059
if defined(macosx):
renameFile(oldVersionFile, newVersionFile)
if not dirExists "build":
mkDir "build"
@ -37,11 +21,8 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
# Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd = "nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir & name & ".nim"
try:
exec(cmd)
finally:
if defined(macosx):
renameFile(newVersionFile, oldVersionFile)
exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params

View File

@ -402,7 +402,8 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy
have = await e.address in b.localStore
price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE)
if e.wantType == WantType.WantHave:
case e.wantType:
of WantType.WantHave:
if have:
presence.add(
BlockPresence(
@ -419,17 +420,19 @@ proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.asy
peerCtx.peerWants.add(e)
codex_block_exchange_want_have_lists_received.inc()
elif e.wantType == WantType.WantBlock:
of WantType.WantBlock:
peerCtx.peerWants.add(e)
schedulePeer = true
codex_block_exchange_want_block_lists_received.inc()
else: # Updating existing entry in peer wants
# peer doesn't want this block anymore
if e.cancel:
trace "Canceling want for block", address = e.address
peerCtx.peerWants.del(idx)
else:
# peer might want to ask for the same cid with
# different want params
trace "Updating want for block", address = e.address
peerCtx.peerWants[idx] = e # update entry
if presence.len > 0:

View File

@ -86,8 +86,7 @@ proc example(_: type G2Point): G2Point =
proc example*(_: type Groth16Proof): Groth16Proof =
Groth16Proof(a: G1Point.example, b: G2Point.example, c: G1Point.example)
proc example*(_: type RandomChunker, blocks: int): Future[string] {.async.} =
# doAssert blocks >= 3, "must be more than 3 blocks"
proc example*(_: type RandomChunker, blocks: int): Future[seq[byte]] {.async.} =
let rng = Rng.instance()
let chunker = RandomChunker.new(
rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize
@ -95,7 +94,7 @@ proc example*(_: type RandomChunker, blocks: int): Future[string] {.async.} =
var data: seq[byte]
while (let moar = await chunker.getBytes(); moar != []):
data.add moar
return byteutils.toHex(data)
return data
proc example*(_: type RandomChunker): Future[string] {.async.} =
await RandomChunker.example(3)

View File

@ -44,6 +44,9 @@ proc upload*(client: CodexClient, contents: string): ?!Cid =
assert response.status == "200 OK"
Cid.init(response.body).mapFailure
proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid =
client.upload(string.fromBytes(bytes))
proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
let response = client.http.get(
client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")

View File

@ -4,6 +4,7 @@ from pkg/libp2p import Cid
import pkg/codex/contracts/marketplace as mp
import pkg/codex/periods
import pkg/codex/utils/json
from pkg/codex/utils import roundUp, divUp
import ./multinodes
import ../contracts/time
import ../contracts/deployment
@ -45,11 +46,14 @@ template marketplacesuite*(name: string, body: untyped) =
proc periods(p: int): uint64 =
p.uint64 * period
proc slotSize(blocks: int): UInt256 =
(DefaultBlockSize * blocks.NBytes).Natural.u256
proc slotSize(blocks, nodes, tolerance: int): UInt256 =
let ecK = nodes - tolerance
let blocksRounded = roundUp(blocks, ecK)
let blocksPerSlot = divUp(blocksRounded, ecK)
(DefaultBlockSize * blocksPerSlot.NBytes).Natural.u256
proc datasetSize(blocks, nodes, tolerance: int): UInt256 =
(nodes + tolerance).u256 * slotSize(blocks)
return nodes.u256 * slotSize(blocks, nodes, tolerance)
proc createAvailabilities(
datasetSize: UInt256,

View File

@ -112,7 +112,7 @@ marketplacesuite "Marketplace":
await ethProvider.advanceTime(duration)
# Checking that the hosting node received reward for at least the time between <expiry;end>
let slotSize = slotSize(blocks)
let slotSize = slotSize(blocks, ecNodes, ecTolerance)
let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize
check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >=
(duration - 5 * 60) * pricePerSlotPerSecond * ecNodes.u256
@ -197,7 +197,7 @@ marketplacesuite "Marketplace payouts":
await advanceToNextPeriod()
let slotSize = slotSize(blocks)
let slotSize = slotSize(blocks, ecNodes, ecTolerance)
let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize
check eventually (

View File

@ -1,7 +1,6 @@
from std/times import inMilliseconds
import pkg/questionable
import pkg/codex/logutils
import pkg/stew/byteutils
import ../contracts/time
import ../contracts/deployment
import ../codex/helpers
@ -60,8 +59,7 @@ marketplacesuite "Hosts submit regular proofs":
let purchase = client0.getPurchase(purchaseId).get
check purchase.error == none string
let request = purchase.request.get
let slotSize = request.ask.slotSize
let slotSize = slotSize(blocks, ecNodes, ecTolerance)
check eventually(
client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000

View File

@ -1,5 +1,6 @@
import std/httpclient
import std/sequtils
import std/strformat
from pkg/libp2p import `==`
import pkg/codex/units
import ./twonodes
@ -144,18 +145,19 @@ twonodessuite "REST API":
check responseBefore.body ==
"Invalid parameters: `tolerance` cannot be greater than `nodes`"
test "request storage succeeds if nodes and tolerance within range", twoNodesConfig:
let data = await RandomChunker.example(blocks = 2)
let cid = client1.upload(data).get
let duration = 100.u256
let pricePerBytePerSecond = 1.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateralPerByte = 1.u256
let ecParams = @[(3, 1), (5, 2)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
for ecParams in @[
(minBlocks: 2, nodes: 3, tolerance: 1), (minBlocks: 3, nodes: 5, tolerance: 2)
]:
let (minBlocks, nodes, tolerance) = ecParams
test "request storage succeeds if nodes and tolerance within range " &
fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig:
let data = await RandomChunker.example(blocks = minBlocks)
let cid = client1.upload(data).get
let duration = 100.u256
let pricePerBytePerSecond = 1.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateralPerByte = 1.u256
var responseBefore = client1.requestStorageRaw(
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,

View File

@ -88,7 +88,7 @@ twonodessuite "Uploads and downloads":
let cid = a.upload(data).get
let response = b.download(cid).get
check:
response == data
@response.mapIt(it.byte) == data
for run in 0 .. 10:
await transferTest(client1, client2)

@ -1 +1 @@
Subproject commit 5e4059746e9095e1731b02eeaecd62a70fbe664d
Subproject commit 6508ce75060878dfcdfa21f94721672c69a1823b