mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-05 06:53:06 +00:00
Merge branch 'master' into feat/async-profiler-enabled
# Conflicts: # .gitmodules
This commit is contained in:
commit
c444df9a5a
14
.github/actions/nimbus-build-system/action.yml
vendored
14
.github/actions/nimbus-build-system/action.yml
vendored
@ -92,10 +92,16 @@ runs:
|
||||
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
|
||||
shell: ${{ inputs.shell }} {0}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
# Skip for older Ubuntu versions
|
||||
if [[ $(lsb_release -r | awk -F '[^0-9]+' '{print $2}') -ge 24 ]]; then
|
||||
# Install GCC-14
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -yq gcc-14
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
fi
|
||||
|
||||
- name: Install ccache on Linux/Mac
|
||||
if: inputs.os == 'linux' || inputs.os == 'macos'
|
||||
|
||||
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@ -61,11 +61,7 @@ jobs:
|
||||
suggest: true
|
||||
|
||||
coverage:
|
||||
# Force to stick to ubuntu 20.04 for coverage because
|
||||
# lcov was updated to 2.x version in ubuntu-latest
|
||||
# and cause a lot of issues.
|
||||
# See https://github.com/linux-test-project/lcov/issues/238
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
16
.github/workflows/docker-dist-tests.yml
vendored
16
.github/workflows/docker-dist-tests.yml
vendored
@ -26,13 +26,29 @@ on:
|
||||
|
||||
|
||||
jobs:
|
||||
get-contracts-hash:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.get-hash.outputs.hash }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Get submodule short hash
|
||||
id: get-hash
|
||||
run: |
|
||||
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
needs: get-contracts-hash
|
||||
with:
|
||||
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
|
||||
nat_ip_auto: true
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
tag_suffix: dist-tests
|
||||
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}-dist-tests"
|
||||
run_release_tests: ${{ inputs.run_release_tests }}
|
||||
secrets: inherit
|
||||
|
||||
36
.github/workflows/docker-reusable.yml
vendored
36
.github/workflows/docker-reusable.yml
vendored
@ -59,6 +59,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: false
|
||||
contract_image:
|
||||
description: Specifies compatible smart contract image
|
||||
required: false
|
||||
type: string
|
||||
|
||||
|
||||
env:
|
||||
@ -71,6 +75,7 @@ env:
|
||||
TAG_LATEST: ${{ inputs.tag_latest }}
|
||||
TAG_SHA: ${{ inputs.tag_sha }}
|
||||
TAG_SUFFIX: ${{ inputs.tag_suffix }}
|
||||
CONTRACT_IMAGE: ${{ inputs.contract_image }}
|
||||
# Tests
|
||||
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
|
||||
TESTS_BRANCH: master
|
||||
@ -80,8 +85,19 @@ env:
|
||||
|
||||
|
||||
jobs:
|
||||
compute:
|
||||
name: Compute build ID
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
build_id: ${{ steps.build_id.outputs.build_id }}
|
||||
steps:
|
||||
- name: Generate unique build id
|
||||
id: build_id
|
||||
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Build platform specific image
|
||||
build:
|
||||
needs: compute
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
@ -108,11 +124,19 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
labels: ${{ env.CONTRACT_LABEL }}
|
||||
|
||||
- name: Docker - Set up Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -147,7 +171,7 @@ jobs:
|
||||
- name: Docker - Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ matrix.target.arch }}
|
||||
name: digests-${{ needs.compute.outputs.build_id }}-${{ matrix.target.arch }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
@ -159,7 +183,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
needs: build
|
||||
needs: [build, compute]
|
||||
steps:
|
||||
- name: Docker - Variables
|
||||
run: |
|
||||
@ -183,11 +207,16 @@ jobs:
|
||||
else
|
||||
echo "TAG_RAW=false" >>$GITHUB_ENV
|
||||
fi
|
||||
|
||||
# Create contract label for compatible contract image if specified
|
||||
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
|
||||
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >>$GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Docker - Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: digests-*
|
||||
pattern: digests-${{ needs.compute.outputs.build_id }}-*
|
||||
merge-multiple: true
|
||||
path: /tmp/digests
|
||||
|
||||
@ -199,6 +228,7 @@ jobs:
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.DOCKER_REPO }}
|
||||
labels: ${{ env.CONTRACT_LABEL }}
|
||||
flavor: |
|
||||
latest=${{ env.TAG_LATEST }}
|
||||
suffix=${{ env.TAG_SUFFIX }},onlatest=true
|
||||
|
||||
18
.github/workflows/docker.yml
vendored
18
.github/workflows/docker.yml
vendored
@ -20,9 +20,25 @@ on:
|
||||
|
||||
|
||||
jobs:
|
||||
get-contracts-hash:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.get-hash.outputs.hash }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Get submodule short hash
|
||||
id: get-hash
|
||||
run: |
|
||||
hash=$(git rev-parse --short HEAD:vendor/codex-contracts-eth)
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
build-and-push:
|
||||
name: Build and Push
|
||||
uses: ./.github/workflows/docker-reusable.yml
|
||||
needs: get-contracts-hash
|
||||
with:
|
||||
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
|
||||
secrets: inherit
|
||||
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
|
||||
secrets: inherit
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
uses: fabiocaccamo/create-matrix-action@v5
|
||||
with:
|
||||
matrix: |
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
|
||||
@ -177,3 +177,12 @@ jobs:
|
||||
files: |
|
||||
/tmp/release/*
|
||||
make_latest: true
|
||||
|
||||
- name: Generate Python SDK
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_PAT }}
|
||||
repository: codex-storage/py-codex-api-client
|
||||
event-type: generate
|
||||
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/codex-storage/nim-codex/${{ github.ref }}/openapi.yaml"}'
|
||||
|
||||
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -224,3 +224,13 @@
|
||||
[submodule "vendor/nim-chroprof"]
|
||||
path = vendor/nim-chroprof
|
||||
url = https://github.com/codex-storage/nim-chroprof.git
|
||||
[submodule "vendor/nim-quic"]
|
||||
path = vendor/nim-quic
|
||||
url = https://github.com/vacp2p/nim-quic.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-ngtcp2"]
|
||||
path = vendor/nim-ngtcp2
|
||||
url = https://github.com/vacp2p/nim-ngtcp2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
|
||||
6
Makefile
6
Makefile
@ -179,11 +179,11 @@ coverage:
|
||||
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
|
||||
cd nimcache/release/testCodex && rm -f *.c
|
||||
mkdir -p coverage
|
||||
lcov --capture --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
||||
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info
|
||||
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
|
||||
|
||||
show-coverage:
|
||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||
|
||||
@ -107,14 +107,14 @@ task coverage, "generates code coverage report":
|
||||
mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
exec(
|
||||
"lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info"
|
||||
"lcov --capture --keep-going --directory nimcache/coverage --output-file coverage/coverage.info"
|
||||
)
|
||||
exec(
|
||||
"lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " &
|
||||
"lcov --extract coverage/coverage.info --keep-going --output-file coverage/coverage.f.info " &
|
||||
nimSrcs
|
||||
)
|
||||
echo " ======== Generating HTML coverage report ======== "
|
||||
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
|
||||
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
|
||||
echo " ======== Coverage report Done ======== "
|
||||
|
||||
task showCoverage, "open coverage html":
|
||||
|
||||
@ -678,7 +678,9 @@ proc new*(
|
||||
advertiser: advertiser,
|
||||
)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
await self.setupPeer(peerId)
|
||||
else:
|
||||
|
||||
@ -323,7 +323,9 @@ method init*(self: BlockExcNetwork) =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
self.setupPeer(peerId)
|
||||
else:
|
||||
@ -332,7 +334,9 @@ method init*(self: BlockExcNetwork) =
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
|
||||
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
|
||||
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(
|
||||
conn: Connection, proto: string
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
let peerId = conn.peerId
|
||||
let blockexcPeer = self.getOrCreatePeer(peerId)
|
||||
await blockexcPeer.readLoop(conn) # attach read loop
|
||||
|
||||
@ -97,7 +97,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
|
||||
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.blk.cid.data.buffer)
|
||||
ipb.write(2, value.blk.data)
|
||||
ipb.write(3, value.address)
|
||||
@ -128,7 +128,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
|
||||
pb.write(field, ipb)
|
||||
|
||||
proc protobufEncode*(value: Message): seq[byte] =
|
||||
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, value.wantList)
|
||||
for v in value.payload:
|
||||
ipb.write(3, v)
|
||||
@ -254,16 +254,14 @@ proc decode*(
|
||||
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
|
||||
var
|
||||
value = Message()
|
||||
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
|
||||
pb = initProtoBuffer(msg)
|
||||
ipb: ProtoBuffer
|
||||
sublist: seq[seq[byte]]
|
||||
if ?pb.getField(1, ipb):
|
||||
value.wantList = ?WantList.decode(ipb)
|
||||
if ?pb.getRepeatedField(3, sublist):
|
||||
for item in sublist:
|
||||
value.payload.add(
|
||||
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
|
||||
)
|
||||
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
|
||||
if ?pb.getRepeatedField(4, sublist):
|
||||
for item in sublist:
|
||||
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))
|
||||
|
||||
@ -488,17 +488,22 @@ proc getCodexRevision(): string =
|
||||
var res = strip(staticExec("git rev-parse --short HEAD"))
|
||||
return res
|
||||
|
||||
proc getCodexContractsRevision(): string =
|
||||
let res = strip(staticExec("git rev-parse --short HEAD:vendor/codex-contracts-eth"))
|
||||
return res
|
||||
|
||||
proc getNimBanner(): string =
|
||||
staticExec("nim --version | grep Version")
|
||||
|
||||
const
|
||||
codexVersion* = getCodexVersion()
|
||||
codexRevision* = getCodexRevision()
|
||||
codexContractsRevision* = getCodexContractsRevision()
|
||||
nimBanner* = getNimBanner()
|
||||
|
||||
codexFullVersion* =
|
||||
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
|
||||
nimBanner
|
||||
"Codex contracts revision: " & codexContractsRevision & "\p" & nimBanner
|
||||
|
||||
proc parseCmdArg*(
|
||||
T: typedesc[MultiAddress], input: string
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
|
||||
export contractabi
|
||||
|
||||
@ -18,9 +18,9 @@ const knownAddresses = {
|
||||
# Taiko Alpha-3 Testnet
|
||||
"167005":
|
||||
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
|
||||
# Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC)
|
||||
# Codex Testnet - Apr 22 2025 12:42:16 PM (+00:00 UTC)
|
||||
"789987":
|
||||
{"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable,
|
||||
{"Marketplace": Address.init("0xDB2908d724a15d05c0B6B8e8441a8b36E67476d3")}.toTable,
|
||||
}.toTable
|
||||
|
||||
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
|
||||
|
||||
@ -76,7 +76,9 @@ proc config(
|
||||
|
||||
return resolvedConfig
|
||||
|
||||
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
|
||||
proc approveFunds(
|
||||
market: OnChainMarket, amount: UInt256
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
debug "Approving tokens", amount
|
||||
convertEthersError("Failed to approve funds"):
|
||||
let tokenAddress = await market.contract.token()
|
||||
@ -105,7 +107,9 @@ method getZkeyHash*(
|
||||
let config = await market.config()
|
||||
return some config.proofs.zkeyHash
|
||||
|
||||
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
|
||||
method getSigner*(
|
||||
market: OnChainMarket
|
||||
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get signer address"):
|
||||
return await market.signer.getAddress()
|
||||
|
||||
@ -159,7 +163,9 @@ method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
|
||||
|
||||
return slots
|
||||
|
||||
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
|
||||
method requestStorage(
|
||||
market: OnChainMarket, request: StorageRequest
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to request storage"):
|
||||
debug "Requesting storage"
|
||||
await market.approveFunds(request.totalPrice())
|
||||
@ -215,7 +221,7 @@ method requestExpiresAt*(
|
||||
|
||||
method getHost(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async.} =
|
||||
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to get slot's host"):
|
||||
let slotId = slotId(requestId, slotIndex)
|
||||
let address = await market.contract.getHost(slotId)
|
||||
@ -226,7 +232,7 @@ method getHost(
|
||||
|
||||
method currentCollateral*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async.} =
|
||||
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||
convertEthersError("Failed to get slot's current collateral"):
|
||||
return await market.contract.currentCollateral(slotId)
|
||||
|
||||
@ -243,37 +249,76 @@ method fillSlot(
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to fill slot"):
|
||||
logScope:
|
||||
requestId
|
||||
slotIndex
|
||||
|
||||
await market.approveFunds(collateral)
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
try:
|
||||
await market.approveFunds(collateral)
|
||||
|
||||
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one to fill a slot in this request
|
||||
trace "estimating gas for fillSlot"
|
||||
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
|
||||
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
|
||||
|
||||
trace "calling fillSlot on contract"
|
||||
discard await market.contract
|
||||
.fillSlot(requestId, slotIndex, proof, overrides)
|
||||
.confirm(1)
|
||||
trace "fillSlot transaction completed"
|
||||
except Marketplace_SlotNotFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
|
||||
parent,
|
||||
)
|
||||
|
||||
method freeSlot*(
|
||||
market: OnChainMarket, slotId: SlotId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to free slot"):
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient,
|
||||
) # SP's address
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
freeSlot = market.contract.freeSlot(slotId)
|
||||
try:
|
||||
var freeSlot: Future[Confirmable]
|
||||
if rewardRecipient =? market.rewardRecipient:
|
||||
# If --reward-recipient specified, use it as the reward recipient, and use
|
||||
# the SP's address as the collateral recipient
|
||||
let collateralRecipient = await market.getSigner()
|
||||
|
||||
discard await freeSlot.confirm(1)
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(
|
||||
slotId, rewardRecipient, collateralRecipient
|
||||
)
|
||||
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
|
||||
|
||||
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
|
||||
freeSlot = market.contract.freeSlot(
|
||||
slotId,
|
||||
rewardRecipient, # --reward-recipient
|
||||
collateralRecipient, # SP's address
|
||||
overrides,
|
||||
)
|
||||
else:
|
||||
# Otherwise, use the SP's address as both the reward and collateral
|
||||
# recipient (the contract will use msg.sender for both)
|
||||
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.freeSlot(slotId)
|
||||
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
|
||||
|
||||
freeSlot = market.contract.freeSlot(slotId, overrides)
|
||||
|
||||
discard await freeSlot.confirm(1)
|
||||
except Marketplace_SlotIsFree as parent:
|
||||
raise newException(
|
||||
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
|
||||
)
|
||||
|
||||
method withdrawFunds(
|
||||
market: OnChainMarket, requestId: RequestId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to withdraw funds"):
|
||||
discard await market.contract.withdrawFunds(requestId).confirm(1)
|
||||
|
||||
@ -300,15 +345,22 @@ method getChallenge*(
|
||||
let overrides = CallOverrides(blockTag: some BlockTag.pending)
|
||||
return await market.contract.getChallenge(id, overrides)
|
||||
|
||||
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
||||
method submitProof*(
|
||||
market: OnChainMarket, id: SlotId, proof: Groth16Proof
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to submit proof"):
|
||||
discard await market.contract.submitProof(id, proof).confirm(1)
|
||||
|
||||
method markProofAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to mark proof as missing"):
|
||||
discard await market.contract.markProofAsMissing(id, period).confirm(1)
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the one to make the request fail
|
||||
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
|
||||
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
|
||||
|
||||
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
market: OnChainMarket, id: SlotId, period: Period
|
||||
@ -325,16 +377,21 @@ method canProofBeMarkedAsMissing*(
|
||||
|
||||
method reserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
convertEthersError("Failed to reserve slot"):
|
||||
discard await market.contract
|
||||
.reserveSlot(
|
||||
requestId,
|
||||
slotIndex,
|
||||
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
|
||||
TransactionOverrides(gasLimit: some 100000.u256),
|
||||
)
|
||||
.confirm(1)
|
||||
try:
|
||||
# Add 10% to gas estimate to deal with different evm code flow when we
|
||||
# happen to be the last one that is allowed to reserve the slot
|
||||
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
|
||||
let overrides = TransactionOverrides(gasLimit: some (gas * 110) div 100)
|
||||
|
||||
discard
|
||||
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
|
||||
except SlotReservations_ReservationNotAllowed:
|
||||
raise newException(
|
||||
SlotReservationNotAllowedError,
|
||||
"Failed to reserve slot because reservation is not allowed",
|
||||
)
|
||||
|
||||
method canReserveSlot*(
|
||||
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
|
||||
|
||||
@ -51,8 +51,8 @@ type
|
||||
Proofs_ProofNotMissing* = object of SolidityError
|
||||
Proofs_ProofNotRequired* = object of SolidityError
|
||||
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
|
||||
Proofs_InvalidProbability* = object of SolidityError
|
||||
Periods_InvalidSecondsPerPeriod* = object of SolidityError
|
||||
SlotReservations_ReservationNotAllowed* = object of SolidityError
|
||||
|
||||
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
|
||||
proc token*(marketplace: Marketplace): Address {.contract, view.}
|
||||
@ -67,7 +67,9 @@ proc requestStorage*(
|
||||
errors: [
|
||||
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
|
||||
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
|
||||
Marketplace_InvalidMaxSlotLoss,
|
||||
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
|
||||
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
|
||||
Marketplace_InsufficientReward, Marketplace_InvalidCid,
|
||||
]
|
||||
.}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import pkg/stint
|
||||
import pkg/contractabi
|
||||
import pkg/ethers/fields
|
||||
import pkg/ethers/contracts/fields
|
||||
|
||||
type
|
||||
Groth16Proof* = object
|
||||
|
||||
@ -3,13 +3,12 @@ import std/sequtils
|
||||
import std/typetraits
|
||||
import pkg/contractabi
|
||||
import pkg/nimcrypto
|
||||
import pkg/ethers/fields
|
||||
import pkg/ethers/contracts/fields
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/byteutils
|
||||
import pkg/libp2p/[cid, multicodec]
|
||||
import ../logutils
|
||||
import ../utils/json
|
||||
import ../clock
|
||||
from ../errors import mapFailure
|
||||
|
||||
export contractabi
|
||||
|
||||
@ -8,6 +8,8 @@
|
||||
## those terms.
|
||||
|
||||
import std/options
|
||||
import std/sugar
|
||||
import std/sequtils
|
||||
|
||||
import pkg/results
|
||||
import pkg/chronos
|
||||
@ -42,7 +44,9 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
|
||||
else:
|
||||
T.failure("Option is None")
|
||||
|
||||
proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.async.} =
|
||||
proc allFinishedFailed*[T](
|
||||
futs: seq[Future[T]]
|
||||
): Future[FinishedFailed[T]] {.async: (raises: [CancelledError]).} =
|
||||
## Check if all futures have finished or failed
|
||||
##
|
||||
## TODO: wip, not sure if we want this - at the minimum,
|
||||
@ -57,3 +61,26 @@ proc allFinishedFailed*[T](futs: seq[Future[T]]): Future[FinishedFailed[T]] {.as
|
||||
res.success.add f
|
||||
|
||||
return res
|
||||
|
||||
proc allFinishedValues*[T](
|
||||
futs: seq[Future[T]]
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
## If all futures have finished, return corresponding values,
|
||||
## otherwise return failure
|
||||
##
|
||||
|
||||
# wait for all futures to be either completed, failed or canceled
|
||||
await allFutures(futs)
|
||||
|
||||
let numOfFailed = futs.countIt(it.failed)
|
||||
|
||||
if numOfFailed > 0:
|
||||
return failure "Some futures failed (" & $numOfFailed & "))"
|
||||
|
||||
# here, we know there are no failed futures in "futs"
|
||||
# and we are only interested in those that completed successfully
|
||||
let values = collect:
|
||||
for b in futs:
|
||||
if b.finished:
|
||||
b.value
|
||||
return success values
|
||||
|
||||
@ -18,6 +18,8 @@ export periods
|
||||
type
|
||||
Market* = ref object of RootObj
|
||||
MarketError* = object of CodexError
|
||||
SlotStateMismatchError* = object of MarketError
|
||||
SlotReservationNotAllowedError* = object of MarketError
|
||||
Subscription* = ref object of RootObj
|
||||
OnRequest* =
|
||||
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
|
||||
@ -72,7 +74,9 @@ method getZkeyHash*(
|
||||
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getSigner*(market: Market): Future[Address] {.base, async.} =
|
||||
method getSigner*(
|
||||
market: Market
|
||||
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method periodicity*(
|
||||
@ -106,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
|
||||
let pntr = await market.getPointer(slotId)
|
||||
return pntr < downtime
|
||||
|
||||
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
|
||||
method requestStorage*(
|
||||
market: Market, request: StorageRequest
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
|
||||
@ -142,12 +148,12 @@ method requestExpiresAt*(
|
||||
|
||||
method getHost*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.base, async.} =
|
||||
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method currentCollateral*(
|
||||
market: Market, slotId: SlotId
|
||||
): Future[UInt256] {.base, async.} =
|
||||
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
|
||||
@ -159,13 +165,17 @@ method fillSlot*(
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.base, async.} =
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
|
||||
method freeSlot*(
|
||||
market: Market, slotId: SlotId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
|
||||
method withdrawFunds*(
|
||||
market: Market, requestId: RequestId
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method subscribeRequests*(
|
||||
@ -184,10 +194,14 @@ method getChallenge*(
|
||||
): Future[ProofChallenge] {.base, async.} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
|
||||
method submitProof*(
|
||||
market: Market, id: SlotId, proof: Groth16Proof
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
|
||||
method markProofAsMissing*(
|
||||
market: Market, id: SlotId, period: Period
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canProofBeMarkedAsMissing*(
|
||||
@ -197,7 +211,7 @@ method canProofBeMarkedAsMissing*(
|
||||
|
||||
method reserveSlot*(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
) {.base, async.} =
|
||||
) {.base, async: (raises: [CancelledError, MarketError]).} =
|
||||
raiseAssert("not implemented")
|
||||
|
||||
method canReserveSlot*(
|
||||
|
||||
@ -27,11 +27,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
|
||||
const MaxMerkleProofSize = 1.MiBs.uint
|
||||
|
||||
proc encode*(self: CodexTree): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.leavesCount.uint64)
|
||||
for node in self.nodes:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(3, nodesPb)
|
||||
@ -40,7 +40,7 @@ proc encode*(self: CodexTree): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var leavesCount: uint64
|
||||
discard ?pb.getField(1, mcodecCode).mapFailure
|
||||
@ -63,13 +63,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
|
||||
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
|
||||
|
||||
proc encode*(self: CodexProof): seq[byte] =
|
||||
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, self.mcodec.uint64)
|
||||
pb.write(2, self.index.uint64)
|
||||
pb.write(3, self.nleaves.uint64)
|
||||
|
||||
for node in self.path:
|
||||
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
|
||||
var nodesPb = initProtoBuffer()
|
||||
nodesPb.write(1, node)
|
||||
nodesPb.finish()
|
||||
pb.write(4, nodesPb)
|
||||
@ -78,7 +78,7 @@ proc encode*(self: CodexProof): seq[byte] =
|
||||
pb.buffer
|
||||
|
||||
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
|
||||
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
|
||||
var pb = initProtoBuffer(data)
|
||||
var mcodecCode: uint64
|
||||
var index: uint64
|
||||
var nleaves: uint64
|
||||
|
||||
@ -15,7 +15,7 @@ import std/sequtils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/libp2p/[cid, multicodec, multihash]
|
||||
|
||||
import pkg/constantine/hashes
|
||||
import ../../utils
|
||||
import ../../rng
|
||||
import ../../errors
|
||||
@ -132,9 +132,13 @@ func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHas
|
||||
## Compress two hashes
|
||||
##
|
||||
|
||||
var digest = newSeq[byte](mhash.size)
|
||||
mhash.coder(@x & @y & @[key.byte], digest)
|
||||
success digest
|
||||
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
|
||||
# See: https://github.com/codex-storage/nim-codex/issues/1162
|
||||
|
||||
let input = @x & @y & @[key.byte]
|
||||
var digest = hashes.sha256.hash(input)
|
||||
|
||||
success @digest
|
||||
|
||||
func init*(
|
||||
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
|
||||
|
||||
@ -183,23 +183,29 @@ proc fetchBatched*(
|
||||
# )
|
||||
|
||||
while not iter.finished:
|
||||
let blocks = collect:
|
||||
let blockFutures = collect:
|
||||
for i in 0 ..< batchSize:
|
||||
if not iter.finished:
|
||||
let address = BlockAddress.init(cid, iter.next())
|
||||
if not (await address in self.networkStore) or fetchLocal:
|
||||
self.networkStore.getBlock(address)
|
||||
|
||||
let res = await allFinishedFailed(blocks)
|
||||
if res.failure.len > 0:
|
||||
trace "Some blocks failed to fetch", len = res.failure.len
|
||||
return failure("Some blocks failed to fetch (" & $res.failure.len & " )")
|
||||
without blockResults =? await allFinishedValues(blockFutures), err:
|
||||
trace "Some blocks failed to fetch", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
if not onBatch.isNil and
|
||||
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
|
||||
let blocks = blockResults.filterIt(it.isSuccess()).mapIt(it.value)
|
||||
|
||||
let numOfFailedBlocks = blockResults.len - blocks.len
|
||||
if numOfFailedBlocks > 0:
|
||||
return
|
||||
failure("Some blocks failed (Result) to fetch (" & $numOfFailedBlocks & ")")
|
||||
|
||||
if not onBatch.isNil and batchErr =? (await onBatch(blocks)).errorOption:
|
||||
return failure(batchErr)
|
||||
|
||||
await sleepAsync(1.millis)
|
||||
if not iter.finished:
|
||||
await sleepAsync(1.millis)
|
||||
|
||||
success()
|
||||
|
||||
@ -271,6 +277,8 @@ proc streamEntireDataset(
|
||||
##
|
||||
trace "Retrieving blocks from manifest", manifestCid
|
||||
|
||||
var jobs: seq[Future[void]]
|
||||
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
|
||||
if manifest.protected:
|
||||
# Retrieve, decode and save to the local store all EС groups
|
||||
proc erasureJob(): Future[void] {.async: (raises: []).} =
|
||||
@ -284,14 +292,25 @@ proc streamEntireDataset(
|
||||
except CatchableError as exc:
|
||||
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
|
||||
|
||||
self.trackedFutures.track(erasureJob())
|
||||
jobs.add(erasureJob())
|
||||
|
||||
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
|
||||
# prefetch task should not fetch from local store
|
||||
jobs.add(self.fetchDatasetAsync(manifest))
|
||||
|
||||
# Monitor stream completion and cancel background jobs when done
|
||||
proc monitorStream() {.async: (raises: []).} =
|
||||
try:
|
||||
await stream.join()
|
||||
except CatchableError as exc:
|
||||
warn "Stream failed", exc = exc.msg
|
||||
finally:
|
||||
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
|
||||
|
||||
self.trackedFutures.track(monitorStream())
|
||||
|
||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||
trace "Creating store stream for manifest", manifestCid
|
||||
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
||||
|
||||
stream.success
|
||||
|
||||
proc retrieve*(
|
||||
self: CodexNodeRef, cid: Cid, local: bool = true
|
||||
|
||||
@ -114,9 +114,14 @@ proc retrieveCid(
|
||||
else:
|
||||
resp.setHeader("Content-Disposition", "attachment")
|
||||
|
||||
resp.setHeader("Content-Length", $manifest.datasetSize.int)
|
||||
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
|
||||
# the length of the non-erasure-coded dataset, as that's what we will be
|
||||
# returning to the client.
|
||||
let contentLength =
|
||||
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
|
||||
resp.setHeader("Content-Length", $(contentLength.int))
|
||||
|
||||
await resp.prepareChunked()
|
||||
await resp.prepare(HttpResponseStreamType.Plain)
|
||||
|
||||
while not stream.atEof:
|
||||
var
|
||||
@ -129,7 +134,7 @@ proc retrieveCid(
|
||||
|
||||
bytes += buff.len
|
||||
|
||||
await resp.sendChunk(addr buff[0], buff.len)
|
||||
await resp.send(addr buff[0], buff.len)
|
||||
await resp.finish()
|
||||
codex_api_downloads.inc()
|
||||
except CancelledError as exc:
|
||||
@ -283,7 +288,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
|
||||
cid: Cid, resp: HttpResponseRef
|
||||
) -> RestApiResponse:
|
||||
## Deletes either a single block or an entire dataset
|
||||
## from the local node. Does nothing and returns 200
|
||||
## from the local node. Does nothing and returns 204
|
||||
## if the dataset is not locally available.
|
||||
##
|
||||
var headers = buildCorsHeaders("DELETE", allowedOrigin)
|
||||
@ -470,7 +475,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
|
||||
if restAv.totalSize == 0:
|
||||
return RestApiResponse.error(
|
||||
Http400, "Total size must be larger then zero", headers = headers
|
||||
Http422, "Total size must be larger then zero", headers = headers
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(restAv.totalSize):
|
||||
@ -479,10 +484,19 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
|
||||
without availability =? (
|
||||
await reservations.createAvailability(
|
||||
restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond,
|
||||
restAv.totalSize,
|
||||
restAv.duration,
|
||||
restAv.minPricePerBytePerSecond,
|
||||
restAv.totalCollateral,
|
||||
enabled = restAv.enabled |? true,
|
||||
until = restAv.until |? 0,
|
||||
)
|
||||
), error:
|
||||
if error of CancelledError:
|
||||
raise error
|
||||
if error of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, error.msg)
|
||||
|
||||
return RestApiResponse.error(Http500, error.msg, headers = headers)
|
||||
|
||||
return RestApiResponse.response(
|
||||
@ -519,6 +533,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
## tokens) to be matched against the request's pricePerBytePerSecond
|
||||
## totalCollateral - total collateral (in amount of
|
||||
## tokens) that can be distributed among matching requests
|
||||
|
||||
try:
|
||||
without contracts =? node.contracts.host:
|
||||
return RestApiResponse.error(Http503, "Persistence is not enabled")
|
||||
@ -543,17 +558,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
return RestApiResponse.error(Http500, error.msg)
|
||||
|
||||
if isSome restAv.freeSize:
|
||||
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
|
||||
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
|
||||
|
||||
if size =? restAv.totalSize:
|
||||
if size == 0:
|
||||
return RestApiResponse.error(Http422, "Total size must be larger then zero")
|
||||
|
||||
# we don't allow lowering the totalSize bellow currently utilized size
|
||||
if size < (availability.totalSize - availability.freeSize):
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
|
||||
$(availability.totalSize - availability.freeSize),
|
||||
)
|
||||
|
||||
if not reservations.hasAvailable(size):
|
||||
return RestApiResponse.error(Http422, "Not enough storage quota")
|
||||
|
||||
availability.freeSize += size - availability.totalSize
|
||||
availability.totalSize = size
|
||||
|
||||
@ -566,10 +587,21 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
if totalCollateral =? restAv.totalCollateral:
|
||||
availability.totalCollateral = totalCollateral
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
if until =? restAv.until:
|
||||
availability.until = until
|
||||
|
||||
return RestApiResponse.response(Http200)
|
||||
if enabled =? restAv.enabled:
|
||||
availability.enabled = enabled
|
||||
|
||||
if err =? (await reservations.update(availability)).errorOption:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if err of UntilOutOfBoundsError:
|
||||
return RestApiResponse.error(Http422, err.msg)
|
||||
else:
|
||||
return RestApiResponse.error(Http500, err.msg)
|
||||
|
||||
return RestApiResponse.response(Http204)
|
||||
except CatchableError as exc:
|
||||
trace "Excepting processing request", exc = exc.msg
|
||||
return RestApiResponse.error(Http500)
|
||||
@ -647,10 +679,36 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
without params =? StorageRequestParams.fromJson(body), error:
|
||||
return RestApiResponse.error(Http400, error.msg, headers = headers)
|
||||
|
||||
let expiry = params.expiry
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Expiry must be greater than zero and less than the request's duration",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
if params.proofProbability <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Proof probability must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.collateralPerByte <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422, "Collateral per byte must be greater than zero", headers = headers
|
||||
)
|
||||
|
||||
if params.pricePerBytePerSecond <= 0:
|
||||
return RestApiResponse.error(
|
||||
Http422,
|
||||
"Price per byte per second must be greater than zero",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
|
||||
if params.duration > requestDurationLimit:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
|
||||
headers = headers,
|
||||
)
|
||||
@ -660,13 +718,13 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
|
||||
if tolerance == 0:
|
||||
return RestApiResponse.error(
|
||||
Http400, "Tolerance needs to be bigger then zero", headers = headers
|
||||
Http422, "Tolerance needs to be bigger then zero", headers = headers
|
||||
)
|
||||
|
||||
# prevent underflow
|
||||
if tolerance > nodes:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
|
||||
headers = headers,
|
||||
)
|
||||
@ -677,21 +735,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
# ensure leopard constrainst of 1 < K ≥ M
|
||||
if ecK <= 1 or ecK < ecM:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
without expiry =? params.expiry:
|
||||
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
|
||||
|
||||
if expiry <= 0 or expiry >= params.duration:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
"Expiry needs value bigger then zero and smaller then the request's duration",
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
without purchaseId =?
|
||||
await node.requestStorage(
|
||||
cid, params.duration, params.proofProbability, nodes, tolerance,
|
||||
@ -699,7 +747,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
||||
), error:
|
||||
if error of InsufficientBlocksError:
|
||||
return RestApiResponse.error(
|
||||
Http400,
|
||||
Http422,
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
|
||||
headers = headers,
|
||||
@ -866,7 +914,11 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
|
||||
"",
|
||||
"announceAddresses": node.discovery.announceAddrs,
|
||||
"table": table,
|
||||
"codex": {"version": $codexVersion, "revision": $codexRevision},
|
||||
"codex": {
|
||||
"version": $codexVersion,
|
||||
"revision": $codexRevision,
|
||||
"contracts": $codexContractsRevision,
|
||||
},
|
||||
}
|
||||
|
||||
# return pretty json for human readability
|
||||
|
||||
@ -17,7 +17,7 @@ type
|
||||
proofProbability* {.serialize.}: UInt256
|
||||
pricePerBytePerSecond* {.serialize.}: UInt256
|
||||
collateralPerByte* {.serialize.}: UInt256
|
||||
expiry* {.serialize.}: ?uint64
|
||||
expiry* {.serialize.}: uint64
|
||||
nodes* {.serialize.}: ?uint
|
||||
tolerance* {.serialize.}: ?uint
|
||||
|
||||
@ -33,6 +33,8 @@ type
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral* {.serialize.}: UInt256
|
||||
freeSize* {.serialize.}: ?uint64
|
||||
enabled* {.serialize.}: ?bool
|
||||
until* {.serialize.}: ?SecondsSince1970
|
||||
|
||||
RestSalesAgent* = object
|
||||
state* {.serialize.}: string
|
||||
|
||||
@ -113,7 +113,6 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
||||
proc cleanUp(
|
||||
sales: Sales,
|
||||
agent: SalesAgent,
|
||||
returnBytes: bool,
|
||||
reprocessSlot: bool,
|
||||
returnedCollateral: ?UInt256,
|
||||
processing: Future[void],
|
||||
@ -132,7 +131,7 @@ proc cleanUp(
|
||||
# if reservation for the SalesAgent was not created, then it means
|
||||
# that the cleanUp was called before the sales process really started, so
|
||||
# there are not really any bytes to be returned
|
||||
if returnBytes and request =? data.request and reservation =? data.reservation:
|
||||
if request =? data.request and reservation =? data.reservation:
|
||||
if returnErr =? (
|
||||
await sales.context.reservations.returnBytesToAvailability(
|
||||
reservation.availabilityId, reservation.id, request.ask.slotSize
|
||||
@ -203,9 +202,9 @@ proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
||||
newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest)
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done)
|
||||
|
||||
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
|
||||
sales.filled(request, slotIndex, done)
|
||||
@ -271,12 +270,12 @@ proc load*(sales: Sales) {.async.} =
|
||||
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
|
||||
|
||||
agent.onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
# since workers are not being dispatched, this future has not been created
|
||||
# by a worker. Create a dummy one here so we can call sales.cleanUp
|
||||
let done: Future[void] = nil
|
||||
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
|
||||
await sales.cleanUp(agent, reprocessSlot, returnedCollateral, done)
|
||||
|
||||
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
||||
# are inherently already filled and so assigning agent.onFilled would be
|
||||
@ -285,7 +284,9 @@ proc load*(sales: Sales) {.async.} =
|
||||
agent.start(SaleUnknown())
|
||||
sales.agents.add agent
|
||||
|
||||
proc OnAvailabilitySaved(sales: Sales, availability: Availability) {.async.} =
|
||||
proc OnAvailabilitySaved(
|
||||
sales: Sales, availability: Availability
|
||||
) {.async: (raises: []).} =
|
||||
## When availabilities are modified or added, the queue should be unpaused if
|
||||
## it was paused and any slots in the queue should have their `seen` flag
|
||||
## cleared.
|
||||
@ -374,13 +375,13 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
|
||||
|
||||
if err =? queue.push(slotQueueItem).errorOption:
|
||||
if err of SlotQueueItemExistsError:
|
||||
error "Failed to push item to queue becaue it already exists",
|
||||
error "Failed to push item to queue because it already exists",
|
||||
error = err.msgDetail
|
||||
elif err of QueueNotRunningError:
|
||||
warn "Failed to push item to queue becaue queue is not running",
|
||||
warn "Failed to push item to queue because queue is not running",
|
||||
error = err.msgDetail
|
||||
except CatchableError as e:
|
||||
warn "Failed to add slot to queue", error = e.msg
|
||||
except CancelledError as e:
|
||||
trace "sales.addSlotToQueue was cancelled"
|
||||
|
||||
# We could get rid of this by adding the storage ask in the SlotFreed event,
|
||||
# so we would not need to call getRequest to get the collateralPerSlot.
|
||||
@ -533,8 +534,9 @@ proc startSlotQueue(sales: Sales) =
|
||||
|
||||
slotQueue.start()
|
||||
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async.} =
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
|
||||
if availability.enabled:
|
||||
await sales.OnAvailabilitySaved(availability)
|
||||
|
||||
reservations.OnAvailabilitySaved = OnAvailabilitySaved
|
||||
|
||||
|
||||
@ -35,6 +35,7 @@ import std/sequtils
|
||||
import std/sugar
|
||||
import std/typetraits
|
||||
import std/sequtils
|
||||
import std/times
|
||||
import pkg/chronos
|
||||
import pkg/datastore
|
||||
import pkg/nimcrypto
|
||||
@ -55,7 +56,7 @@ export requests
|
||||
export logutils
|
||||
|
||||
logScope:
|
||||
topics = "sales reservations"
|
||||
topics = "marketplace sales reservations"
|
||||
|
||||
type
|
||||
AvailabilityId* = distinct array[32, byte]
|
||||
@ -70,6 +71,12 @@ type
|
||||
minPricePerBytePerSecond* {.serialize.}: UInt256
|
||||
totalCollateral {.serialize.}: UInt256
|
||||
totalRemainingCollateral* {.serialize.}: UInt256
|
||||
# If set to false, the availability will not accept new slots.
|
||||
# If enabled, it will not impact any existing slots that are already being hosted.
|
||||
enabled* {.serialize.}: bool
|
||||
# Specifies the latest timestamp after which the availability will no longer host any slots.
|
||||
# If set to 0, there will be no restrictions.
|
||||
until* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservation* = ref object
|
||||
id* {.serialize.}: ReservationId
|
||||
@ -77,6 +84,7 @@ type
|
||||
size* {.serialize.}: uint64
|
||||
requestId* {.serialize.}: RequestId
|
||||
slotIndex* {.serialize.}: uint64
|
||||
validUntil* {.serialize.}: SecondsSince1970
|
||||
|
||||
Reservations* = ref object of RootObj
|
||||
availabilityLock: AsyncLock
|
||||
@ -84,10 +92,14 @@ type
|
||||
repo: RepoStore
|
||||
OnAvailabilitySaved: ?OnAvailabilitySaved
|
||||
|
||||
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
|
||||
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
|
||||
OnAvailabilitySaved* =
|
||||
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
|
||||
GetNext* = proc(): Future[?seq[byte]] {.
|
||||
upraises: [], gcsafe, async: (raises: [CancelledError]), closure
|
||||
.}
|
||||
IterDispose* =
|
||||
proc(): Future[?!void] {.gcsafe, async: (raises: [CancelledError]), closure.}
|
||||
OnAvailabilitySaved* = proc(availability: Availability): Future[void] {.
|
||||
upraises: [], gcsafe, async: (raises: [])
|
||||
.}
|
||||
StorableIter* = ref object
|
||||
finished*: bool
|
||||
next*: GetNext
|
||||
@ -102,13 +114,20 @@ type
|
||||
SerializationError* = object of ReservationsError
|
||||
UpdateFailedError* = object of ReservationsError
|
||||
BytesOutOfBoundsError* = object of ReservationsError
|
||||
UntilOutOfBoundsError* = object of ReservationsError
|
||||
|
||||
const
|
||||
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
|
||||
ReservationsKey = (SalesKey / "reservations").tryGet
|
||||
|
||||
proc hash*(x: AvailabilityId): Hash {.borrow.}
|
||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.}
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
template withLock(lock, body) =
|
||||
try:
|
||||
@ -128,6 +147,8 @@ proc init*(
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Availability =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
@ -139,6 +160,8 @@ proc init*(
|
||||
minPricePerBytePerSecond: minPricePerBytePerSecond,
|
||||
totalCollateral: totalCollateral,
|
||||
totalRemainingCollateral: totalCollateral,
|
||||
enabled: enabled,
|
||||
until: until,
|
||||
)
|
||||
|
||||
func totalCollateral*(self: Availability): UInt256 {.inline.} =
|
||||
@ -154,6 +177,7 @@ proc init*(
|
||||
size: uint64,
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
validUntil: SecondsSince1970,
|
||||
): Reservation =
|
||||
var id: array[32, byte]
|
||||
doAssert randomBytes(id) == 32
|
||||
@ -163,6 +187,7 @@ proc init*(
|
||||
size: size,
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
validUntil: validUntil,
|
||||
)
|
||||
|
||||
func toArray(id: SomeStorableId): array[32, byte] =
|
||||
@ -217,11 +242,19 @@ func available*(self: Reservations): uint =
|
||||
func hasAvailable*(self: Reservations, bytes: uint): bool =
|
||||
self.repo.available(bytes.NBytes)
|
||||
|
||||
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
|
||||
proc exists*(
|
||||
self: Reservations, key: Key
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
let exists = await self.repo.metaDs.ds.contains(key)
|
||||
return exists
|
||||
|
||||
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc getImpl(
|
||||
self: Reservations, key: Key
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
if not await self.exists(key):
|
||||
let err =
|
||||
newException(NotExistsError, "object with key " & $key & " does not exist")
|
||||
@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
|
||||
|
||||
proc get*(
|
||||
self: Reservations, key: Key, T: type SomeStorableObject
|
||||
): Future[?!T] {.async.} =
|
||||
): Future[?!T] {.async: (raises: [CancelledError]).} =
|
||||
without serialized =? await self.getImpl(key), error:
|
||||
return failure(error)
|
||||
|
||||
@ -243,7 +276,9 @@ proc get*(
|
||||
|
||||
return success obj
|
||||
|
||||
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
|
||||
proc updateImpl(
|
||||
self: Reservations, obj: SomeStorableObject
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
trace "updating " & $(obj.type), id = obj.id
|
||||
|
||||
without key =? obj.key, error:
|
||||
@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a
|
||||
|
||||
proc updateAvailability(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
availabilityId = obj.id
|
||||
|
||||
if obj.until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
without key =? obj.key, error:
|
||||
return failure(error)
|
||||
|
||||
@ -269,21 +309,25 @@ proc updateAvailability(
|
||||
let res = await self.updateImpl(obj)
|
||||
# inform subscribers that Availability has been added
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await OnAvailabilitySaved(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `OnAvailabilitySaved` can raise because it is caller-defined
|
||||
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
else:
|
||||
return failure(err)
|
||||
|
||||
if obj.until > 0:
|
||||
without allReservations =? await self.all(Reservation, obj.id), error:
|
||||
error.msg = "Error updating reservation: " & error.msg
|
||||
return failure(error)
|
||||
|
||||
let requestEnds = allReservations.mapIt(it.validUntil)
|
||||
|
||||
if requestEnds.len > 0 and requestEnds.max > obj.until:
|
||||
let error = newException(
|
||||
UntilOutOfBoundsError,
|
||||
"Until parameter must be greater or equal to the longest currently hosted slot",
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
|
||||
if oldAvailability.totalSize != obj.totalSize:
|
||||
trace "totalSize changed, updating repo reservation"
|
||||
@ -306,26 +350,23 @@ proc updateAvailability(
|
||||
# inform subscribers that Availability has been modified (with increased
|
||||
# size)
|
||||
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
|
||||
# when chronos v4 is implemented, and OnAvailabilitySaved is annotated
|
||||
# with async:(raises:[]), we can remove this try/catch as we know, with
|
||||
# certainty, that nothing will be raised
|
||||
try:
|
||||
await OnAvailabilitySaved(obj)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
# we don't have any insight into types of exceptions that
|
||||
# `OnAvailabilitySaved` can raise because it is caller-defined
|
||||
warn "Unknown error during 'OnAvailabilitySaved' callback", error = e.msg
|
||||
|
||||
await OnAvailabilitySaved(obj)
|
||||
return res
|
||||
|
||||
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
|
||||
proc update*(
|
||||
self: Reservations, obj: Reservation
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await self.updateImpl(obj)
|
||||
|
||||
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
proc update*(
|
||||
self: Reservations, obj: Availability
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
withLock(self.availabilityLock):
|
||||
return await self.updateAvailability(obj)
|
||||
except AsyncLockError as e:
|
||||
error "Lock error when trying to update the availability", err = e.msg
|
||||
return failure(e)
|
||||
|
||||
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
|
||||
trace "deleting object", key
|
||||
@ -391,12 +432,20 @@ proc createAvailability*(
|
||||
duration: uint64,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
totalCollateral: UInt256,
|
||||
enabled: bool,
|
||||
until: SecondsSince1970,
|
||||
): Future[?!Availability] {.async.} =
|
||||
trace "creating availability",
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral
|
||||
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
|
||||
let availability =
|
||||
Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral)
|
||||
if until < 0:
|
||||
let error =
|
||||
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
|
||||
return failure(error)
|
||||
|
||||
let availability = Availability.init(
|
||||
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
|
||||
)
|
||||
let bytes = availability.freeSize
|
||||
|
||||
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
|
||||
@ -420,6 +469,7 @@ method createReservation*(
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?!Reservation] {.async, base.} =
|
||||
withLock(self.availabilityLock):
|
||||
without availabilityKey =? availabilityId.key, error:
|
||||
@ -436,9 +486,11 @@ method createReservation*(
|
||||
)
|
||||
return failure(error)
|
||||
|
||||
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
|
||||
trace "Creating reservation",
|
||||
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
|
||||
|
||||
let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex)
|
||||
let reservation =
|
||||
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
|
||||
|
||||
if createResErr =? (await self.update(reservation)).errorOption:
|
||||
return failure(createResErr)
|
||||
@ -448,7 +500,7 @@ method createReservation*(
|
||||
availability.freeSize -= slotSize
|
||||
|
||||
# adjust the remaining totalRemainingCollateral
|
||||
availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte
|
||||
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
|
||||
|
||||
# update availability with reduced size
|
||||
trace "Updating availability with reduced size"
|
||||
@ -527,7 +579,7 @@ proc release*(
|
||||
reservationId: ReservationId,
|
||||
availabilityId: AvailabilityId,
|
||||
bytes: uint,
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
topics = "release"
|
||||
bytes
|
||||
@ -565,13 +617,9 @@ proc release*(
|
||||
|
||||
return success()
|
||||
|
||||
iterator items(self: StorableIter): Future[?seq[byte]] =
|
||||
while not self.finished:
|
||||
yield self.next()
|
||||
|
||||
proc storables(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!StorableIter] {.async.} =
|
||||
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
|
||||
var iter = StorableIter()
|
||||
let query = Query.init(queryKey)
|
||||
when T is Availability:
|
||||
@ -589,7 +637,7 @@ proc storables(
|
||||
return failure(error)
|
||||
|
||||
# /sales/reservations
|
||||
proc next(): Future[?seq[byte]] {.async.} =
|
||||
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
|
||||
await idleAsync()
|
||||
iter.finished = results.finished
|
||||
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
|
||||
@ -598,7 +646,7 @@ proc storables(
|
||||
|
||||
return none seq[byte]
|
||||
|
||||
proc dispose(): Future[?!void] {.async.} =
|
||||
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
return await results.dispose()
|
||||
|
||||
iter.next = next
|
||||
@ -607,32 +655,40 @@ proc storables(
|
||||
|
||||
proc allImpl(
|
||||
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
var ret: seq[T] = @[]
|
||||
|
||||
without storables =? (await self.storables(T, queryKey)), error:
|
||||
return failure(error)
|
||||
|
||||
for storable in storables.items:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
try:
|
||||
without bytes =? (await storable):
|
||||
continue
|
||||
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
without obj =? T.fromJson(bytes), error:
|
||||
error "json deserialization error",
|
||||
json = string.fromBytes(bytes), error = error.msg
|
||||
continue
|
||||
|
||||
ret.add obj
|
||||
ret.add obj
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except CatchableError as err:
|
||||
error "Error when retrieving storable", error = err.msg
|
||||
continue
|
||||
|
||||
return success(ret)
|
||||
|
||||
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
return await self.allImpl(T)
|
||||
|
||||
proc all*(
|
||||
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
|
||||
): Future[?!seq[T]] {.async.} =
|
||||
without key =? (ReservationsKey / $availabilityId):
|
||||
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
|
||||
without key =? key(availabilityId):
|
||||
return failure("no key")
|
||||
|
||||
return await self.allImpl(T, key)
|
||||
@ -641,6 +697,7 @@ proc findAvailability*(
|
||||
self: Reservations,
|
||||
size, duration: uint64,
|
||||
pricePerBytePerSecond, collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?Availability] {.async.} =
|
||||
without storables =? (await self.storables(Availability)), e:
|
||||
error "failed to get all storables", error = e.msg
|
||||
@ -648,11 +705,14 @@ proc findAvailability*(
|
||||
|
||||
for item in storables.items:
|
||||
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
|
||||
if size <= availability.freeSize and duration <= availability.duration and
|
||||
if availability.enabled and size <= availability.freeSize and
|
||||
duration <= availability.duration and
|
||||
collateralPerByte <= availability.maxCollateralPerByte and
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond:
|
||||
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
|
||||
(availability.until == 0 or availability.until >= validUntil):
|
||||
trace "availability matched",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
@ -660,7 +720,8 @@ proc findAvailability*(
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
# TODO: As soon as we're on ARC-ORC, we can use destructors
|
||||
# to automatically dispose our iterators when they fall out of scope.
|
||||
@ -672,6 +733,7 @@ proc findAvailability*(
|
||||
|
||||
trace "availability did not match",
|
||||
id = availability.id,
|
||||
enabled = availability.enabled,
|
||||
size,
|
||||
availFreeSize = availability.freeSize,
|
||||
duration,
|
||||
@ -679,4 +741,5 @@ proc findAvailability*(
|
||||
pricePerBytePerSecond,
|
||||
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte
|
||||
availMaxCollateralPerByte = availability.maxCollateralPerByte,
|
||||
until = availability.until
|
||||
|
||||
@ -27,7 +27,7 @@ type
|
||||
onFilled*: ?OnFilled
|
||||
|
||||
OnCleanUp* = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
): Future[void] {.gcsafe, upraises: [].}
|
||||
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
|
||||
|
||||
|
||||
@ -12,6 +12,14 @@ type SaleCancelled* = ref object of SaleState
|
||||
method `$`*(state: SaleCancelled): string =
|
||||
"SaleCancelled"
|
||||
|
||||
proc slotIsFilledByMe(
|
||||
market: Market, requestId: RequestId, slotIndex: uint64
|
||||
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
let host = await market.getHost(requestId, slotIndex)
|
||||
let me = await market.getSigner()
|
||||
|
||||
return host == me.some
|
||||
|
||||
method run*(
|
||||
state: SaleCancelled, machine: Machine
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
@ -23,21 +31,27 @@ method run*(
|
||||
raiseAssert "no sale request"
|
||||
|
||||
try:
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Collecting collateral and partial payout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
await market.freeSlot(slot.id)
|
||||
var returnedCollateral = UInt256.none
|
||||
|
||||
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
|
||||
debug "Collecting collateral and partial payout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
let currentCollateral = await market.currentCollateral(slot.id)
|
||||
|
||||
try:
|
||||
await market.freeSlot(slot.id)
|
||||
except SlotStateMismatchError as e:
|
||||
warn "Failed to free slot because slot is already free", error = e.msg
|
||||
|
||||
returnedCollateral = currentCollateral.some
|
||||
|
||||
if onClear =? agent.context.onClear and request =? data.request:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(
|
||||
returnBytes = true,
|
||||
reprocessSlot = false,
|
||||
returnedCollateral = some currentCollateral,
|
||||
)
|
||||
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
|
||||
|
||||
warn "Sale cancelled due to timeout",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
@ -34,7 +34,7 @@ method run*(
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
|
||||
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||
except CancelledError as e:
|
||||
trace "SaleErrored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
|
||||
@ -28,6 +28,7 @@ method run*(
|
||||
let slot = Slot(request: request, slotIndex: data.slotIndex)
|
||||
debug "Removing slot from mySlots",
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
await market.freeSlot(slot.id)
|
||||
|
||||
let error = newException(SaleFailedError, "Sale failed")
|
||||
|
||||
@ -30,6 +30,7 @@ method run*(
|
||||
): Future[?State] {.async: (raises: []).} =
|
||||
let data = SalesAgent(machine).data
|
||||
let market = SalesAgent(machine).context.market
|
||||
|
||||
without (request =? data.request):
|
||||
raiseAssert "Request not set"
|
||||
|
||||
@ -42,17 +43,16 @@ method run*(
|
||||
err:
|
||||
error "Failure attempting to fill slot: unable to calculate collateral",
|
||||
error = err.msg
|
||||
return
|
||||
return some State(SaleErrored(error: err))
|
||||
|
||||
debug "Filling slot"
|
||||
try:
|
||||
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
|
||||
except SlotStateMismatchError as e:
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except MarketError as e:
|
||||
if e.msg.contains "Slot is not free":
|
||||
debug "Slot is already filled, ignoring slot"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleErrored(error: e))
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
return some State(SaleFilled())
|
||||
|
||||
@ -36,6 +36,9 @@ method run*(
|
||||
requestId = data.requestId, slotIndex = data.slotIndex
|
||||
|
||||
try:
|
||||
if onClear =? agent.context.onClear:
|
||||
onClear(request, data.slotIndex)
|
||||
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(returnedCollateral = state.returnedCollateral)
|
||||
except CancelledError as e:
|
||||
|
||||
@ -14,7 +14,6 @@ logScope:
|
||||
|
||||
type SaleIgnored* = ref object of SaleState
|
||||
reprocessSlot*: bool # readd slot to queue with `seen` flag
|
||||
returnBytes*: bool # return unreleased bytes from Reservation to Availability
|
||||
|
||||
method `$`*(state: SaleIgnored): string =
|
||||
"SaleIgnored"
|
||||
@ -26,9 +25,7 @@ method run*(
|
||||
|
||||
try:
|
||||
if onCleanUp =? agent.onCleanUp:
|
||||
await onCleanUp(
|
||||
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
|
||||
)
|
||||
await onCleanUp(reprocessSlot = state.reprocessSlot)
|
||||
except CancelledError as e:
|
||||
trace "SaleIgnored.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
|
||||
@ -56,7 +56,7 @@ method run*(
|
||||
let slotId = slotId(data.requestId, data.slotIndex)
|
||||
let state = await market.slotState(slotId)
|
||||
if state != SlotState.Free and state != SlotState.Repair:
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: false))
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
|
||||
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
|
||||
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
|
||||
@ -68,10 +68,12 @@ method run*(
|
||||
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
|
||||
collateralPerByte = request.ask.collateralPerByte
|
||||
|
||||
let requestEnd = await market.getRequestEnd(data.requestId)
|
||||
|
||||
without availability =?
|
||||
await reservations.findAvailability(
|
||||
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
|
||||
request.ask.collateralPerByte,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
):
|
||||
debug "No availability found for request, ignoring"
|
||||
|
||||
@ -82,7 +84,7 @@ method run*(
|
||||
without reservation =?
|
||||
await reservations.createReservation(
|
||||
availability.id, request.ask.slotSize, request.id, data.slotIndex,
|
||||
request.ask.collateralPerByte,
|
||||
request.ask.collateralPerByte, requestEnd,
|
||||
), error:
|
||||
trace "Creation of reservation failed"
|
||||
# Race condition:
|
||||
|
||||
@ -44,12 +44,11 @@ method run*(
|
||||
try:
|
||||
trace "Reserving slot"
|
||||
await market.reserveSlot(data.requestId, data.slotIndex)
|
||||
except SlotReservationNotAllowedError as e:
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except MarketError as e:
|
||||
if e.msg.contains "SlotReservations_ReservationNotAllowed":
|
||||
debug "Slot cannot be reserved, ignoring", error = e.msg
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
else:
|
||||
return some State(SaleErrored(error: e))
|
||||
return some State(SaleErrored(error: e))
|
||||
# other CatchableErrors are handled "automatically" by the SaleState
|
||||
|
||||
trace "Slot successfully reserved"
|
||||
@ -58,7 +57,7 @@ method run*(
|
||||
# do not re-add this slot to the queue, and return bytes from Reservation to
|
||||
# the Availability
|
||||
debug "Slot cannot be reserved, ignoring"
|
||||
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
|
||||
return some State(SaleIgnored(reprocessSlot: false))
|
||||
except CancelledError as e:
|
||||
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
|
||||
except CatchableError as e:
|
||||
|
||||
@ -105,7 +105,7 @@ proc updateQuotaUsage*(
|
||||
minusUsed: NBytes = 0.NBytes,
|
||||
plusReserved: NBytes = 0.NBytes,
|
||||
minusReserved: NBytes = 0.NBytes,
|
||||
): Future[?!void] {.async.} =
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
await self.metaDs.modify(
|
||||
QuotaUsedKey,
|
||||
proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} =
|
||||
|
||||
@ -380,7 +380,9 @@ method close*(self: RepoStore): Future[void] {.async.} =
|
||||
# RepoStore procs
|
||||
###########################################################
|
||||
|
||||
proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
proc reserve*(
|
||||
self: RepoStore, bytes: NBytes
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## Reserve bytes
|
||||
##
|
||||
|
||||
@ -388,7 +390,9 @@ proc reserve*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
|
||||
await self.updateQuotaUsage(plusReserved = bytes)
|
||||
|
||||
proc release*(self: RepoStore, bytes: NBytes): Future[?!void] {.async.} =
|
||||
proc release*(
|
||||
self: RepoStore, bytes: NBytes
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
## Release bytes
|
||||
##
|
||||
|
||||
|
||||
215
openapi.yaml
215
openapi.yaml
@ -27,10 +27,6 @@ components:
|
||||
maxLength: 66
|
||||
example: 0x...
|
||||
|
||||
BigInt:
|
||||
type: string
|
||||
description: Integer represented as decimal string
|
||||
|
||||
Cid:
|
||||
type: string
|
||||
description: Content Identifier as specified at https://github.com/multiformats/cid
|
||||
@ -54,18 +50,23 @@ components:
|
||||
type: string
|
||||
description: The amount of tokens paid per byte per second per slot to hosts the client is willing to pay
|
||||
|
||||
Duration:
|
||||
CollateralPerByte:
|
||||
type: string
|
||||
description: The duration of the request in seconds as decimal string
|
||||
description: Number as decimal string that represents how much collateral per byte is asked from hosts that wants to fill a slots
|
||||
|
||||
Duration:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The duration of the request in seconds
|
||||
|
||||
ProofProbability:
|
||||
type: string
|
||||
description: How often storage proofs are required as decimal string
|
||||
|
||||
Expiry:
|
||||
type: string
|
||||
type: integer
|
||||
format: int64
|
||||
description: A timestamp as seconds since unix epoch at which this request expires if the Request does not find requested amount of nodes to host the data.
|
||||
default: 10 minutes
|
||||
|
||||
SPR:
|
||||
type: string
|
||||
@ -73,6 +74,8 @@ components:
|
||||
|
||||
SPRRead:
|
||||
type: object
|
||||
required:
|
||||
- spr
|
||||
properties:
|
||||
spr:
|
||||
$ref: "#/components/schemas/SPR"
|
||||
@ -85,6 +88,8 @@ components:
|
||||
|
||||
Content:
|
||||
type: object
|
||||
required:
|
||||
- cid
|
||||
description: Parameters specifying the content
|
||||
properties:
|
||||
cid:
|
||||
@ -92,6 +97,12 @@ components:
|
||||
|
||||
Node:
|
||||
type: object
|
||||
required:
|
||||
- nodeId
|
||||
- peerId
|
||||
- record
|
||||
- address
|
||||
- seen
|
||||
properties:
|
||||
nodeId:
|
||||
type: string
|
||||
@ -113,9 +124,15 @@ components:
|
||||
revision:
|
||||
type: string
|
||||
example: 0c647d8
|
||||
contracts:
|
||||
type: string
|
||||
example: 0b537c7
|
||||
|
||||
PeersTable:
|
||||
type: object
|
||||
required:
|
||||
- localNode
|
||||
- nodes
|
||||
properties:
|
||||
localNode:
|
||||
$ref: "#/components/schemas/Node"
|
||||
@ -126,6 +143,14 @@ components:
|
||||
|
||||
DebugInfo:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- addrs
|
||||
- repo
|
||||
- spr
|
||||
- announceAddresses
|
||||
- table
|
||||
- codex
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/PeerId"
|
||||
@ -149,12 +174,16 @@ components:
|
||||
|
||||
SalesAvailability:
|
||||
type: object
|
||||
required:
|
||||
- totalSize
|
||||
- duration
|
||||
- minPricePerBytePerSecond
|
||||
- totalCollateral
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/Id"
|
||||
totalSize:
|
||||
type: string
|
||||
description: Total size of availability's storage in bytes as decimal string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Total size of availability's storage in bytes
|
||||
duration:
|
||||
$ref: "#/components/schemas/Duration"
|
||||
minPricePerBytePerSecond:
|
||||
@ -163,44 +192,63 @@ components:
|
||||
totalCollateral:
|
||||
type: string
|
||||
description: Total collateral (in amount of tokens) that can be used for matching requests
|
||||
enabled:
|
||||
type: boolean
|
||||
description: Enable the ability to receive sales on this availability.
|
||||
default: true
|
||||
until:
|
||||
type: integer
|
||||
description: Specifies the latest timestamp, after which the availability will no longer host any slots. If set to 0, there will be no restrictions.
|
||||
default: 0
|
||||
|
||||
SalesAvailabilityREAD:
|
||||
required:
|
||||
- id
|
||||
- totalRemainingCollateral
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SalesAvailability"
|
||||
- type: object
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/Id"
|
||||
readonly: true
|
||||
freeSize:
|
||||
type: string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Unused size of availability's storage in bytes as decimal string
|
||||
|
||||
SalesAvailabilityCREATE:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SalesAvailability"
|
||||
- required:
|
||||
- totalSize
|
||||
- minPricePerBytePerSecond
|
||||
- totalCollateral
|
||||
- duration
|
||||
readOnly: true
|
||||
totalRemainingCollateral:
|
||||
type: string
|
||||
description: Total collateral effective (in amount of tokens) that can be used for matching requests
|
||||
readOnly: true
|
||||
|
||||
Slot:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- request
|
||||
- slotIndex
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/SlotId"
|
||||
request:
|
||||
$ref: "#/components/schemas/StorageRequest"
|
||||
slotIndex:
|
||||
type: string
|
||||
description: Slot Index as decimal string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Slot Index number
|
||||
|
||||
SlotAgent:
|
||||
type: object
|
||||
required:
|
||||
- state
|
||||
- requestId
|
||||
- slotIndex
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/SlotId"
|
||||
slotIndex:
|
||||
type: string
|
||||
description: Slot Index as decimal string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Slot Index number
|
||||
requestId:
|
||||
$ref: "#/components/schemas/Id"
|
||||
request:
|
||||
@ -227,18 +275,31 @@ components:
|
||||
|
||||
Reservation:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- availabilityId
|
||||
- size
|
||||
- requestId
|
||||
- slotIndex
|
||||
- validUntil
|
||||
properties:
|
||||
id:
|
||||
$ref: "#/components/schemas/Id"
|
||||
availabilityId:
|
||||
$ref: "#/components/schemas/Id"
|
||||
size:
|
||||
$ref: "#/components/schemas/BigInt"
|
||||
type: integer
|
||||
format: int64
|
||||
description: Size of the slot in bytes
|
||||
requestId:
|
||||
$ref: "#/components/schemas/Id"
|
||||
slotIndex:
|
||||
type: string
|
||||
description: Slot Index as decimal string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Slot Index number
|
||||
validUntil:
|
||||
type: integer
|
||||
description: Timestamp after which the reservation will no longer be valid.
|
||||
|
||||
StorageRequestCreation:
|
||||
type: object
|
||||
@ -258,40 +319,60 @@ components:
|
||||
nodes:
|
||||
description: Minimal number of nodes the content should be stored on
|
||||
type: integer
|
||||
default: 1
|
||||
default: 3
|
||||
minimum: 3
|
||||
tolerance:
|
||||
description: Additional number of nodes on top of the `nodes` property that can be lost before pronouncing the content lost
|
||||
type: integer
|
||||
default: 0
|
||||
default: 1
|
||||
minimum: 1
|
||||
collateralPerByte:
|
||||
type: string
|
||||
description: Number as decimal string that represents how much collateral per byte is asked from hosts that wants to fill a slots
|
||||
$ref: "#/components/schemas/CollateralPerByte"
|
||||
expiry:
|
||||
type: string
|
||||
description: Number as decimal string that represents expiry threshold in seconds from when the Request is submitted. When the threshold is reached and the Request does not find requested amount of nodes to host the data, the Request is voided. The number of seconds can not be higher then the Request's duration itself.
|
||||
type: integer
|
||||
format: int64
|
||||
description: Number that represents expiry threshold in seconds from when the Request is submitted. When the threshold is reached and the Request does not find requested amount of nodes to host the data, the Request is voided. The number of seconds can not be higher then the Request's duration itself.
|
||||
StorageAsk:
|
||||
type: object
|
||||
required:
|
||||
- slots
|
||||
- slotSize
|
||||
- duration
|
||||
- proofProbability
|
||||
- pricePerBytePerSecond
|
||||
- collateralPerByte
|
||||
- maxSlotLoss
|
||||
properties:
|
||||
slots:
|
||||
description: Number of slots (eq. hosts) that the Request want to have the content spread over
|
||||
type: integer
|
||||
format: int64
|
||||
slotSize:
|
||||
type: string
|
||||
description: Amount of storage per slot (in bytes) as decimal string
|
||||
type: integer
|
||||
format: int64
|
||||
description: Amount of storage per slot in bytes
|
||||
duration:
|
||||
$ref: "#/components/schemas/Duration"
|
||||
proofProbability:
|
||||
$ref: "#/components/schemas/ProofProbability"
|
||||
pricePerBytePerSecond:
|
||||
$ref: "#/components/schemas/PricePerBytePerSecond"
|
||||
collateralPerByte:
|
||||
$ref: "#/components/schemas/CollateralPerByte"
|
||||
maxSlotLoss:
|
||||
type: integer
|
||||
format: int64
|
||||
description: Max slots that can be lost without data considered to be lost
|
||||
|
||||
StorageRequest:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- client
|
||||
- ask
|
||||
- content
|
||||
- expiry
|
||||
- nonce
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
@ -310,13 +391,16 @@ components:
|
||||
|
||||
Purchase:
|
||||
type: object
|
||||
required:
|
||||
- state
|
||||
- requestId
|
||||
properties:
|
||||
state:
|
||||
type: string
|
||||
description: Description of the Request's state
|
||||
enum:
|
||||
- cancelled
|
||||
- error
|
||||
- errored
|
||||
- failed
|
||||
- finished
|
||||
- pending
|
||||
@ -329,9 +413,13 @@ components:
|
||||
description: If Request failed, then here is presented the error message
|
||||
request:
|
||||
$ref: "#/components/schemas/StorageRequest"
|
||||
requestId:
|
||||
$ref: "#/components/schemas/Id"
|
||||
|
||||
DataList:
|
||||
type: object
|
||||
required:
|
||||
- content
|
||||
properties:
|
||||
content:
|
||||
type: array
|
||||
@ -340,6 +428,9 @@ components:
|
||||
|
||||
DataItem:
|
||||
type: object
|
||||
required:
|
||||
- cid
|
||||
- manifest
|
||||
properties:
|
||||
cid:
|
||||
$ref: "#/components/schemas/Cid"
|
||||
@ -348,6 +439,11 @@ components:
|
||||
|
||||
ManifestItem:
|
||||
type: object
|
||||
required:
|
||||
- treeCid
|
||||
- datasetSize
|
||||
- blockSize
|
||||
- protected
|
||||
properties:
|
||||
treeCid:
|
||||
$ref: "#/components/schemas/Cid"
|
||||
@ -375,6 +471,11 @@ components:
|
||||
|
||||
Space:
|
||||
type: object
|
||||
required:
|
||||
- totalBlocks
|
||||
- quotaMaxBytes
|
||||
- quotaUsedBytes
|
||||
- quotaReservedBytes
|
||||
properties:
|
||||
totalBlocks:
|
||||
description: "Number of blocks stored by the node"
|
||||
@ -493,6 +594,8 @@ paths:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
"422":
|
||||
description: The mimetype of the filename is invalid
|
||||
"500":
|
||||
description: Well it was bad-bad and the upload did not work out
|
||||
|
||||
@ -524,6 +627,26 @@ paths:
|
||||
"500":
|
||||
description: Well it was bad-bad
|
||||
|
||||
delete:
|
||||
summary: "Deletes either a single block or an entire dataset from the local node."
|
||||
tags: [Data]
|
||||
operationId: deleteLocal
|
||||
parameters:
|
||||
- in: path
|
||||
name: cid
|
||||
required: true
|
||||
schema:
|
||||
$ref: "#/components/schemas/Cid"
|
||||
description: Block or dataset to be deleted.
|
||||
|
||||
responses:
|
||||
"204":
|
||||
description: Data was successfully deleted.
|
||||
"400":
|
||||
description: Invalid CID is specified
|
||||
"500":
|
||||
description: There was an error during deletion
|
||||
|
||||
"/data/{cid}/network":
|
||||
post:
|
||||
summary: "Download a file from the network to the local node if it's not available locally. Note: Download is performed async. Call can return before download is completed."
|
||||
@ -693,7 +816,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/SalesAvailabilityCREATE"
|
||||
$ref: "#/components/schemas/SalesAvailability"
|
||||
responses:
|
||||
"201":
|
||||
description: Created storage availability
|
||||
@ -704,7 +827,7 @@ paths:
|
||||
"400":
|
||||
description: Invalid data input
|
||||
"422":
|
||||
description: Not enough node's storage quota available
|
||||
description: Not enough node's storage quota available or the provided parameters did not pass validation
|
||||
"500":
|
||||
description: Error reserving availability
|
||||
"503":
|
||||
@ -737,7 +860,7 @@ paths:
|
||||
"404":
|
||||
description: Availability not found
|
||||
"422":
|
||||
description: Not enough node's storage quota available
|
||||
description: The provided parameters did not pass validation
|
||||
"500":
|
||||
description: Error reserving availability
|
||||
"503":
|
||||
@ -800,6 +923,8 @@ paths:
|
||||
type: string
|
||||
"400":
|
||||
description: Invalid or missing Request ID
|
||||
"422":
|
||||
description: The storage request parameters are not valid
|
||||
"404":
|
||||
description: Request ID not found
|
||||
"503":
|
||||
@ -857,7 +982,7 @@ paths:
|
||||
"200":
|
||||
description: Node's SPR
|
||||
content:
|
||||
plain/text:
|
||||
text/plain:
|
||||
schema:
|
||||
$ref: "#/components/schemas/SPR"
|
||||
application/json:
|
||||
@ -875,7 +1000,7 @@ paths:
|
||||
"200":
|
||||
description: Node's Peer ID
|
||||
content:
|
||||
plain/text:
|
||||
text/plain:
|
||||
schema:
|
||||
$ref: "#/components/schemas/PeerId"
|
||||
application/json:
|
||||
|
||||
7
redocly.yaml
Normal file
7
redocly.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
extends:
|
||||
- recommended
|
||||
|
||||
rules:
|
||||
info-license: off
|
||||
no-required-schema-properties-undefined: error
|
||||
no-server-example.com: off
|
||||
@ -75,6 +75,8 @@ proc example*(
|
||||
duration = uint16.example.uint64,
|
||||
minPricePerBytePerSecond = uint8.example.u256,
|
||||
totalCollateral = totalSize.u256 * collateralPerByte,
|
||||
enabled = true,
|
||||
until = 0.SecondsSince1970,
|
||||
)
|
||||
|
||||
proc example*(_: type Reservation): Reservation =
|
||||
|
||||
@ -8,6 +8,7 @@ import pkg/codex/market
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/contracts/proofs
|
||||
import pkg/codex/contracts/config
|
||||
import pkg/questionable/results
|
||||
|
||||
from pkg/ethers import BlockTag
|
||||
import codex/clock
|
||||
@ -46,7 +47,10 @@ type
|
||||
subscriptions: Subscriptions
|
||||
config*: MarketplaceConfig
|
||||
canReserveSlot*: bool
|
||||
reserveSlotThrowError*: ?(ref MarketError)
|
||||
errorOnReserveSlot*: ?(ref MarketError)
|
||||
errorOnFillSlot*: ?(ref MarketError)
|
||||
errorOnFreeSlot*: ?(ref MarketError)
|
||||
errorOnGetHost*: ?(ref MarketError)
|
||||
clock: ?Clock
|
||||
|
||||
Fulfillment* = object
|
||||
@ -143,7 +147,9 @@ method loadConfig*(
|
||||
): Future[?!void] {.async: (raises: [CancelledError]).} =
|
||||
discard
|
||||
|
||||
method getSigner*(market: MockMarket): Future[Address] {.async.} =
|
||||
method getSigner*(
|
||||
market: MockMarket
|
||||
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
return market.signer
|
||||
|
||||
method periodicity*(
|
||||
@ -172,7 +178,9 @@ method repairRewardPercentage*(
|
||||
method getPointer*(market: MockMarket, slotId: SlotId): Future[uint8] {.async.} =
|
||||
return market.proofPointer
|
||||
|
||||
method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} =
|
||||
method requestStorage*(
|
||||
market: MockMarket, request: StorageRequest
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
market.requested.add(request)
|
||||
var subscriptions = market.subscriptions.onRequest
|
||||
for subscription in subscriptions:
|
||||
@ -227,7 +235,10 @@ method requestExpiresAt*(
|
||||
|
||||
method getHost*(
|
||||
market: MockMarket, requestId: RequestId, slotIndex: uint64
|
||||
): Future[?Address] {.async.} =
|
||||
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
|
||||
if error =? market.errorOnGetHost:
|
||||
raise error
|
||||
|
||||
for slot in market.filled:
|
||||
if slot.requestId == requestId and slot.slotIndex == slotIndex:
|
||||
return some slot.host
|
||||
@ -235,7 +246,7 @@ method getHost*(
|
||||
|
||||
method currentCollateral*(
|
||||
market: MockMarket, slotId: SlotId
|
||||
): Future[UInt256] {.async.} =
|
||||
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
|
||||
for slot in market.filled:
|
||||
if slotId == slotId(slot.requestId, slot.slotIndex):
|
||||
return slot.collateral
|
||||
@ -289,6 +300,9 @@ proc fillSlot*(
|
||||
host: Address,
|
||||
collateral = 0.u256,
|
||||
) =
|
||||
if error =? market.errorOnFillSlot:
|
||||
raise error
|
||||
|
||||
let slot = MockSlot(
|
||||
requestId: requestId,
|
||||
slotIndex: slotIndex,
|
||||
@ -307,10 +321,15 @@ method fillSlot*(
|
||||
slotIndex: uint64,
|
||||
proof: Groth16Proof,
|
||||
collateral: UInt256,
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
market.fillSlot(requestId, slotIndex, proof, market.signer, collateral)
|
||||
|
||||
method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} =
|
||||
method freeSlot*(
|
||||
market: MockMarket, slotId: SlotId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
if error =? market.errorOnFreeSlot:
|
||||
raise error
|
||||
|
||||
market.freed.add(slotId)
|
||||
for s in market.filled:
|
||||
if slotId(s.requestId, s.slotIndex) == slotId:
|
||||
@ -318,7 +337,9 @@ method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} =
|
||||
break
|
||||
market.slotState[slotId] = SlotState.Free
|
||||
|
||||
method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} =
|
||||
method withdrawFunds*(
|
||||
market: MockMarket, requestId: RequestId
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
market.withdrawn.add(requestId)
|
||||
|
||||
if state =? market.requestState .? [requestId] and state == RequestState.Cancelled:
|
||||
@ -348,12 +369,16 @@ method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.asy
|
||||
proc setProofEnd*(mock: MockMarket, id: SlotId, proofEnd: UInt256) =
|
||||
mock.proofEnds[id] = proofEnd
|
||||
|
||||
method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} =
|
||||
method submitProof*(
|
||||
mock: MockMarket, id: SlotId, proof: Groth16Proof
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
mock.submitted.add(proof)
|
||||
for subscription in mock.subscriptions.onProofSubmitted:
|
||||
subscription.callback(id)
|
||||
|
||||
method markProofAsMissing*(market: MockMarket, id: SlotId, period: Period) {.async.} =
|
||||
method markProofAsMissing*(
|
||||
market: MockMarket, id: SlotId, period: Period
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
market.markedAsMissingProofs.add(id)
|
||||
|
||||
proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) =
|
||||
@ -369,8 +394,8 @@ method canProofBeMarkedAsMissing*(
|
||||
|
||||
method reserveSlot*(
|
||||
market: MockMarket, requestId: RequestId, slotIndex: uint64
|
||||
) {.async.} =
|
||||
if error =? market.reserveSlotThrowError:
|
||||
) {.async: (raises: [CancelledError, MarketError]).} =
|
||||
if error =? market.errorOnReserveSlot:
|
||||
raise error
|
||||
|
||||
method canReserveSlot*(
|
||||
@ -381,8 +406,33 @@ method canReserveSlot*(
|
||||
func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) =
|
||||
market.canReserveSlot = canReserveSlot
|
||||
|
||||
func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) =
|
||||
market.reserveSlotThrowError = error
|
||||
func setErrorOnReserveSlot*(market: MockMarket, error: ref MarketError) =
|
||||
market.errorOnReserveSlot =
|
||||
if error.isNil:
|
||||
none (ref MarketError)
|
||||
else:
|
||||
some error
|
||||
|
||||
func setErrorOnFillSlot*(market: MockMarket, error: ref MarketError) =
|
||||
market.errorOnFillSlot =
|
||||
if error.isNil:
|
||||
none (ref MarketError)
|
||||
else:
|
||||
some error
|
||||
|
||||
func setErrorOnFreeSlot*(market: MockMarket, error: ref MarketError) =
|
||||
market.errorOnFreeSlot =
|
||||
if error.isNil:
|
||||
none (ref MarketError)
|
||||
else:
|
||||
some error
|
||||
|
||||
func setErrorOnGetHost*(market: MockMarket, error: ref MarketError) =
|
||||
market.errorOnGetHost =
|
||||
if error.isNil:
|
||||
none (ref MarketError)
|
||||
else:
|
||||
some error
|
||||
|
||||
method subscribeRequests*(
|
||||
market: MockMarket, callback: OnRequest
|
||||
|
||||
@ -2,6 +2,7 @@ import pkg/chronos
|
||||
import pkg/codex/sales
|
||||
import pkg/codex/stores
|
||||
import pkg/questionable/results
|
||||
import pkg/codex/clock
|
||||
|
||||
type MockReservations* = ref object of Reservations
|
||||
createReservationThrowBytesOutOfBoundsError: bool
|
||||
@ -28,6 +29,7 @@ method createReservation*(
|
||||
requestId: RequestId,
|
||||
slotIndex: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
validUntil: SecondsSince1970,
|
||||
): Future[?!Reservation] {.async.} =
|
||||
if self.createReservationThrowBytesOutOfBoundsError:
|
||||
let error = newException(
|
||||
@ -45,4 +47,5 @@ method createReservation*(
|
||||
requestId,
|
||||
slotIndex,
|
||||
collateralPerByte,
|
||||
validUntil,
|
||||
)
|
||||
|
||||
@ -33,10 +33,10 @@ proc new*(
|
||||
return 0
|
||||
|
||||
var read = 0
|
||||
while read < len:
|
||||
while read < len and (pad or read < size - consumed):
|
||||
rng.shuffle(alpha)
|
||||
for a in alpha:
|
||||
if read >= len:
|
||||
if read >= len or (not pad and read >= size - consumed):
|
||||
break
|
||||
|
||||
data[read] = a
|
||||
|
||||
@ -30,6 +30,7 @@ import pkg/codex/discovery
|
||||
import pkg/codex/erasure
|
||||
import pkg/codex/merkletree
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/rng
|
||||
|
||||
import pkg/codex/node {.all.}
|
||||
|
||||
@ -78,6 +79,31 @@ asyncchecksuite "Test Node - Basic":
|
||||
)
|
||||
).tryGet()
|
||||
|
||||
test "Block Batching with corrupted blocks":
|
||||
let blocks = await makeRandomBlocks(datasetSize = 64.KiBs.int, blockSize = 64.KiBs)
|
||||
assert blocks.len == 1
|
||||
|
||||
let blk = blocks[0]
|
||||
|
||||
# corrupt block
|
||||
let pos = rng.Rng.instance.rand(blk.data.len - 1)
|
||||
blk.data[pos] = byte 0
|
||||
|
||||
let manifest = await storeDataGetManifest(localStore, blocks)
|
||||
|
||||
let batchSize = manifest.blocksCount
|
||||
let res = (
|
||||
await node.fetchBatched(
|
||||
manifest,
|
||||
batchSize = batchSize,
|
||||
proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, async.} =
|
||||
return failure("Should not be called"),
|
||||
)
|
||||
)
|
||||
check res.isFailure
|
||||
check res.error of CatchableError
|
||||
check res.error.msg == "Some blocks failed (Result) to fetch (1)"
|
||||
|
||||
test "Should store Data Stream":
|
||||
let
|
||||
stream = BufferStream.new()
|
||||
|
||||
@ -2,9 +2,11 @@ import pkg/questionable
|
||||
import pkg/chronos
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/sales/states/cancelled
|
||||
import pkg/codex/sales/states/errored
|
||||
import pkg/codex/sales/salesagent
|
||||
import pkg/codex/sales/salescontext
|
||||
import pkg/codex/market
|
||||
from pkg/codex/utils/asyncstatemachine import State
|
||||
|
||||
import ../../../asynctest
|
||||
import ../../examples
|
||||
@ -22,16 +24,14 @@ asyncchecksuite "sales state 'cancelled'":
|
||||
var market: MockMarket
|
||||
var state: SaleCancelled
|
||||
var agent: SalesAgent
|
||||
var returnBytesWas = bool.none
|
||||
var reprocessSlotWas = bool.none
|
||||
var returnedCollateralValue = UInt256.none
|
||||
var reprocessSlotWas: ?bool
|
||||
var returnedCollateralValue: ?UInt256
|
||||
|
||||
setup:
|
||||
market = MockMarket.new()
|
||||
let onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
returnBytesWas = some returnBytes
|
||||
reprocessSlotWas = some reprocessSlot
|
||||
returnedCollateralValue = returnedCollateral
|
||||
|
||||
@ -39,8 +39,43 @@ asyncchecksuite "sales state 'cancelled'":
|
||||
agent = newSalesAgent(context, request.id, slotIndex, request.some)
|
||||
agent.onCleanUp = onCleanUp
|
||||
state = SaleCancelled.new()
|
||||
reprocessSlotWas = bool.none
|
||||
returnedCollateralValue = UInt256.none
|
||||
teardown:
|
||||
reprocessSlotWas = bool.none
|
||||
returnedCollateralValue = UInt256.none
|
||||
|
||||
test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral":
|
||||
test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral":
|
||||
market.fillSlot(
|
||||
requestId = request.id,
|
||||
slotIndex = slotIndex,
|
||||
proof = Groth16Proof.default,
|
||||
host = await market.getSigner(),
|
||||
collateral = currentCollateral,
|
||||
)
|
||||
discard await state.run(agent)
|
||||
check eventually reprocessSlotWas == some false
|
||||
check eventually returnedCollateralValue == some currentCollateral
|
||||
|
||||
test "completes the cancelled state when free slot error is raised and the collateral is returned when a host is hosting a slot":
|
||||
market.fillSlot(
|
||||
requestId = request.id,
|
||||
slotIndex = slotIndex,
|
||||
proof = Groth16Proof.default,
|
||||
host = await market.getSigner(),
|
||||
collateral = currentCollateral,
|
||||
)
|
||||
|
||||
let error =
|
||||
newException(SlotStateMismatchError, "Failed to free slot, slot is already free")
|
||||
market.setErrorOnFreeSlot(error)
|
||||
|
||||
let next = await state.run(agent)
|
||||
check next == none State
|
||||
check eventually reprocessSlotWas == some false
|
||||
check eventually returnedCollateralValue == some currentCollateral
|
||||
|
||||
test "completes the cancelled state when free slot error is raised and the collateral is not returned when a host is not hosting a slot":
|
||||
market.fillSlot(
|
||||
requestId = request.id,
|
||||
slotIndex = slotIndex,
|
||||
@ -48,7 +83,30 @@ asyncchecksuite "sales state 'cancelled'":
|
||||
host = Address.example,
|
||||
collateral = currentCollateral,
|
||||
)
|
||||
discard await state.run(agent)
|
||||
check eventually returnBytesWas == some true
|
||||
|
||||
let error =
|
||||
newException(SlotStateMismatchError, "Failed to free slot, slot is already free")
|
||||
market.setErrorOnFreeSlot(error)
|
||||
|
||||
let next = await state.run(agent)
|
||||
check next == none State
|
||||
check eventually reprocessSlotWas == some false
|
||||
check eventually returnedCollateralValue == some currentCollateral
|
||||
check eventually returnedCollateralValue == UInt256.none
|
||||
|
||||
test "calls onCleanUp and returns the collateral when an error is raised":
|
||||
market.fillSlot(
|
||||
requestId = request.id,
|
||||
slotIndex = slotIndex,
|
||||
proof = Groth16Proof.default,
|
||||
host = Address.example,
|
||||
collateral = currentCollateral,
|
||||
)
|
||||
|
||||
let error = newException(MarketError, "")
|
||||
market.setErrorOnGetHost(error)
|
||||
|
||||
let next = !(await state.run(agent))
|
||||
|
||||
check next of SaleErrored
|
||||
let errored = SaleErrored(next)
|
||||
check errored.error == error
|
||||
|
||||
@ -20,14 +20,12 @@ asyncchecksuite "sales state 'errored'":
|
||||
|
||||
var state: SaleErrored
|
||||
var agent: SalesAgent
|
||||
var returnBytesWas = false
|
||||
var reprocessSlotWas = false
|
||||
|
||||
setup:
|
||||
let onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
returnBytesWas = returnBytes
|
||||
reprocessSlotWas = reprocessSlot
|
||||
|
||||
let context = SalesContext(market: market, clock: clock)
|
||||
@ -35,8 +33,7 @@ asyncchecksuite "sales state 'errored'":
|
||||
agent.onCleanUp = onCleanUp
|
||||
state = SaleErrored(error: newException(ValueError, "oh no!"))
|
||||
|
||||
test "calls onCleanUp with returnBytes = false and reprocessSlot = true":
|
||||
test "calls onCleanUp with reprocessSlot = true":
|
||||
state = SaleErrored(error: newException(ValueError, "oh no!"), reprocessSlot: true)
|
||||
discard await state.run(agent)
|
||||
check eventually returnBytesWas == true
|
||||
check eventually reprocessSlotWas == true
|
||||
|
||||
@ -1,18 +1,31 @@
|
||||
import pkg/unittest2
|
||||
import pkg/questionable
|
||||
import pkg/codex/contracts/requests
|
||||
import pkg/codex/sales/states/filling
|
||||
import pkg/codex/sales/states/cancelled
|
||||
import pkg/codex/sales/states/failed
|
||||
import pkg/codex/sales/states/ignored
|
||||
import pkg/codex/sales/states/errored
|
||||
import pkg/codex/sales/salesagent
|
||||
import pkg/codex/sales/salescontext
|
||||
import ../../../asynctest
|
||||
import ../../examples
|
||||
import ../../helpers
|
||||
import ../../helpers/mockmarket
|
||||
import ../../helpers/mockclock
|
||||
|
||||
suite "sales state 'filling'":
|
||||
let request = StorageRequest.example
|
||||
let slotIndex = request.ask.slots div 2
|
||||
var state: SaleFilling
|
||||
var market: MockMarket
|
||||
var clock: MockClock
|
||||
var agent: SalesAgent
|
||||
|
||||
setup:
|
||||
clock = MockClock.new()
|
||||
market = MockMarket.new()
|
||||
let context = SalesContext(market: market, clock: clock)
|
||||
agent = newSalesAgent(context, request.id, slotIndex, request.some)
|
||||
state = SaleFilling.new()
|
||||
|
||||
test "switches to cancelled state when request expires":
|
||||
@ -22,3 +35,27 @@ suite "sales state 'filling'":
|
||||
test "switches to failed state when request fails":
|
||||
let next = state.onFailed(request)
|
||||
check !next of SaleFailed
|
||||
|
||||
test "run switches to ignored when slot is not free":
|
||||
let error = newException(
|
||||
SlotStateMismatchError, "Failed to fill slot because the slot is not free"
|
||||
)
|
||||
market.setErrorOnFillSlot(error)
|
||||
market.requested.add(request)
|
||||
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
|
||||
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleIgnored
|
||||
check SaleIgnored(next).reprocessSlot == false
|
||||
|
||||
test "run switches to errored with other error ":
|
||||
let error = newException(MarketError, "some error")
|
||||
market.setErrorOnFillSlot(error)
|
||||
market.requested.add(request)
|
||||
market.slotState[request.slotId(slotIndex)] = SlotState.Filled
|
||||
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleErrored
|
||||
|
||||
let errored = SaleErrored(next)
|
||||
check errored.error == error
|
||||
|
||||
@ -23,22 +23,23 @@ asyncchecksuite "sales state 'finished'":
|
||||
var market: MockMarket
|
||||
var state: SaleFinished
|
||||
var agent: SalesAgent
|
||||
var returnBytesWas = bool.none
|
||||
var reprocessSlotWas = bool.none
|
||||
var returnedCollateralValue = UInt256.none
|
||||
var saleCleared = bool.none
|
||||
|
||||
setup:
|
||||
market = MockMarket.new()
|
||||
let onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
returnBytesWas = some returnBytes
|
||||
reprocessSlotWas = some reprocessSlot
|
||||
returnedCollateralValue = returnedCollateral
|
||||
|
||||
let context = SalesContext(market: market, clock: clock)
|
||||
agent = newSalesAgent(context, request.id, slotIndex, request.some)
|
||||
agent.onCleanUp = onCleanUp
|
||||
agent.context.onClear = some proc(request: StorageRequest, idx: uint64) =
|
||||
saleCleared = some true
|
||||
state = SaleFinished(returnedCollateral: some currentCollateral)
|
||||
|
||||
test "switches to cancelled state when request expires":
|
||||
@ -49,8 +50,8 @@ asyncchecksuite "sales state 'finished'":
|
||||
let next = state.onFailed(request)
|
||||
check !next of SaleFailed
|
||||
|
||||
test "calls onCleanUp with returnBytes = false, reprocessSlot = true, and returnedCollateral = currentCollateral":
|
||||
test "calls onCleanUp with reprocessSlot = true, and returnedCollateral = currentCollateral":
|
||||
discard await state.run(agent)
|
||||
check eventually returnBytesWas == some false
|
||||
check eventually reprocessSlotWas == some false
|
||||
check eventually returnedCollateralValue == some currentCollateral
|
||||
check eventually saleCleared == some true
|
||||
|
||||
@ -20,14 +20,12 @@ asyncchecksuite "sales state 'ignored'":
|
||||
|
||||
var state: SaleIgnored
|
||||
var agent: SalesAgent
|
||||
var returnBytesWas = false
|
||||
var reprocessSlotWas = false
|
||||
|
||||
setup:
|
||||
let onCleanUp = proc(
|
||||
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
reprocessSlot = false, returnedCollateral = UInt256.none
|
||||
) {.async.} =
|
||||
returnBytesWas = returnBytes
|
||||
reprocessSlotWas = reprocessSlot
|
||||
|
||||
let context = SalesContext(market: market, clock: clock)
|
||||
@ -36,7 +34,6 @@ asyncchecksuite "sales state 'ignored'":
|
||||
state = SaleIgnored.new()
|
||||
|
||||
test "calls onCleanUp with values assigned to SaleIgnored":
|
||||
state = SaleIgnored(reprocessSlot: true, returnBytes: true)
|
||||
state = SaleIgnored(reprocessSlot: true)
|
||||
discard await state.run(agent)
|
||||
check eventually returnBytesWas == true
|
||||
check eventually reprocessSlotWas == true
|
||||
|
||||
@ -13,6 +13,7 @@ import pkg/codex/sales/salesagent
|
||||
import pkg/codex/sales/salescontext
|
||||
import pkg/codex/sales/reservations
|
||||
import pkg/codex/stores/repostore
|
||||
import times
|
||||
import ../../../asynctest
|
||||
import ../../helpers
|
||||
import ../../examples
|
||||
@ -39,6 +40,8 @@ asyncchecksuite "sales state 'preparing'":
|
||||
duration = request.ask.duration + 60.uint64,
|
||||
minPricePerBytePerSecond = request.ask.pricePerBytePerSecond,
|
||||
totalCollateral = request.ask.collateralPerSlot * request.ask.slots.u256,
|
||||
enabled = true,
|
||||
until = 0.SecondsSince1970,
|
||||
)
|
||||
let repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
let metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||
@ -52,6 +55,8 @@ asyncchecksuite "sales state 'preparing'":
|
||||
context.reservations = reservations
|
||||
agent = newSalesAgent(context, request.id, slotIndex, request.some)
|
||||
|
||||
market.requestEnds[request.id] = clock.now() + cast[int64](request.ask.duration)
|
||||
|
||||
teardown:
|
||||
await repo.stop()
|
||||
|
||||
@ -67,10 +72,14 @@ asyncchecksuite "sales state 'preparing'":
|
||||
let next = state.onSlotFilled(request.id, slotIndex)
|
||||
check !next of SaleFilled
|
||||
|
||||
proc createAvailability() {.async.} =
|
||||
proc createAvailability(enabled = true) {.async.} =
|
||||
let a = await reservations.createAvailability(
|
||||
availability.totalSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, availability.totalCollateral,
|
||||
availability.totalSize,
|
||||
availability.duration,
|
||||
availability.minPricePerBytePerSecond,
|
||||
availability.totalCollateral,
|
||||
enabled,
|
||||
until = 0.SecondsSince1970,
|
||||
)
|
||||
availability = a.get
|
||||
|
||||
@ -79,7 +88,11 @@ asyncchecksuite "sales state 'preparing'":
|
||||
check next of SaleIgnored
|
||||
let ignored = SaleIgnored(next)
|
||||
check ignored.reprocessSlot
|
||||
check ignored.returnBytes == false
|
||||
|
||||
test "run switches to ignored when availability is not enabled":
|
||||
await createAvailability(enabled = false)
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleIgnored
|
||||
|
||||
test "run switches to slot reserving state after reservation created":
|
||||
await createAvailability()
|
||||
@ -94,7 +107,6 @@ asyncchecksuite "sales state 'preparing'":
|
||||
check next of SaleIgnored
|
||||
let ignored = SaleIgnored(next)
|
||||
check ignored.reprocessSlot
|
||||
check ignored.returnBytes == false
|
||||
|
||||
test "run switches to errored when reserve fails with other error":
|
||||
await createAvailability()
|
||||
|
||||
@ -54,16 +54,16 @@ asyncchecksuite "sales state 'SlotReserving'":
|
||||
|
||||
test "run switches to errored when slot reservation errors":
|
||||
let error = newException(MarketError, "some error")
|
||||
market.setReserveSlotThrowError(some error)
|
||||
market.setErrorOnReserveSlot(error)
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleErrored
|
||||
let errored = SaleErrored(next)
|
||||
check errored.error == error
|
||||
|
||||
test "catches reservation not allowed error":
|
||||
let error = newException(MarketError, "SlotReservations_ReservationNotAllowed")
|
||||
market.setReserveSlotThrowError(some error)
|
||||
test "run switches to ignored when reservation is not allowed":
|
||||
let error =
|
||||
newException(SlotReservationNotAllowedError, "Reservation is not allowed")
|
||||
market.setErrorOnReserveSlot(error)
|
||||
let next = !(await state.run(agent))
|
||||
check next of SaleIgnored
|
||||
check SaleIgnored(next).reprocessSlot == false
|
||||
check SaleIgnored(next).returnBytes
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import std/random
|
||||
|
||||
import std/times
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos
|
||||
@ -8,6 +8,7 @@ import pkg/datastore
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/errors
|
||||
import pkg/codex/sales
|
||||
import pkg/codex/clock
|
||||
import pkg/codex/utils/json
|
||||
|
||||
import ../../asynctest
|
||||
@ -39,19 +40,22 @@ asyncchecksuite "Reservations module":
|
||||
await repoTmp.destroyDb()
|
||||
await metaTmp.destroyDb()
|
||||
|
||||
proc createAvailability(): Availability =
|
||||
proc createAvailability(enabled = true, until = 0.SecondsSince1970): Availability =
|
||||
let example = Availability.example(collateralPerByte)
|
||||
let totalSize = rand(100000 .. 200000).uint64
|
||||
let totalCollateral = totalSize.u256 * collateralPerByte
|
||||
let availability = waitFor reservations.createAvailability(
|
||||
totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral
|
||||
totalSize, example.duration, example.minPricePerBytePerSecond, totalCollateral,
|
||||
enabled, until,
|
||||
)
|
||||
return availability.get
|
||||
|
||||
proc createReservation(availability: Availability): Reservation =
|
||||
let size = rand(1 ..< availability.freeSize.int)
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let reservation = waitFor reservations.createReservation(
|
||||
availability.id, size.uint64, RequestId.example, uint64.example, 1.u256
|
||||
availability.id, size.uint64, RequestId.example, uint64.example, 1.u256,
|
||||
validUntil,
|
||||
)
|
||||
return reservation.get
|
||||
|
||||
@ -64,8 +68,12 @@ asyncchecksuite "Reservations module":
|
||||
check (await reservations.all(Availability)).get.len == 0
|
||||
|
||||
test "generates unique ids for storage availability":
|
||||
let availability1 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256)
|
||||
let availability2 = Availability.init(1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256)
|
||||
let availability1 = Availability.init(
|
||||
1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970
|
||||
)
|
||||
let availability2 = Availability.init(
|
||||
1.uint64, 2.uint64, 3.uint64, 4.u256, 5.u256, true, 0.SecondsSince1970
|
||||
)
|
||||
check availability1.id != availability2.id
|
||||
|
||||
test "can reserve available storage":
|
||||
@ -128,20 +136,24 @@ asyncchecksuite "Reservations module":
|
||||
|
||||
test "cannot create reservation with non-existant availability":
|
||||
let availability = Availability.example
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let created = await reservations.createReservation(
|
||||
availability.id, uint64.example, RequestId.example, uint64.example, 1.u256
|
||||
availability.id, uint64.example, RequestId.example, uint64.example, 1.u256,
|
||||
validUntil,
|
||||
)
|
||||
check created.isErr
|
||||
check created.error of NotExistsError
|
||||
|
||||
test "cannot create reservation larger than availability size":
|
||||
let availability = createAvailability()
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let created = await reservations.createReservation(
|
||||
availability.id,
|
||||
availability.totalSize + 1,
|
||||
RequestId.example,
|
||||
uint64.example,
|
||||
UInt256.example,
|
||||
validUntil,
|
||||
)
|
||||
check created.isErr
|
||||
check created.error of BytesOutOfBoundsError
|
||||
@ -149,23 +161,26 @@ asyncchecksuite "Reservations module":
|
||||
test "cannot create reservation larger than availability size - concurrency test":
|
||||
proc concurrencyTest(): Future[void] {.async.} =
|
||||
let availability = createAvailability()
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let one = reservations.createReservation(
|
||||
availability.id,
|
||||
availability.totalSize - 1,
|
||||
RequestId.example,
|
||||
uint64.example,
|
||||
UInt256.example,
|
||||
validUntil,
|
||||
)
|
||||
|
||||
let two = reservations.createReservation(
|
||||
availability.id, availability.totalSize, RequestId.example, uint64.example,
|
||||
UInt256.example,
|
||||
UInt256.example, validUntil,
|
||||
)
|
||||
|
||||
let oneResult = await one
|
||||
let twoResult = await two
|
||||
|
||||
check oneResult.isErr or twoResult.isErr
|
||||
|
||||
if oneResult.isErr:
|
||||
check oneResult.error of BytesOutOfBoundsError
|
||||
if twoResult.isErr:
|
||||
@ -259,6 +274,48 @@ asyncchecksuite "Reservations module":
|
||||
check isOk await reservations.update(availability)
|
||||
check (repo.quotaReservedBytes - origQuota) == 100.NBytes
|
||||
|
||||
test "create availability set enabled to true by default":
|
||||
let availability = createAvailability()
|
||||
check availability.enabled == true
|
||||
|
||||
test "create availability set until to 0 by default":
|
||||
let availability = createAvailability()
|
||||
check availability.until == 0.SecondsSince1970
|
||||
|
||||
test "create availability whith correct values":
|
||||
var until = getTime().toUnix()
|
||||
|
||||
let availability = createAvailability(enabled = false, until = until)
|
||||
check availability.enabled == false
|
||||
check availability.until == until
|
||||
|
||||
test "create an availability fails when trying set until with a negative value":
|
||||
let totalSize = rand(100000 .. 200000).uint64
|
||||
let example = Availability.example(collateralPerByte)
|
||||
let totalCollateral = totalSize.u256 * collateralPerByte
|
||||
|
||||
let result = await reservations.createAvailability(
|
||||
totalSize,
|
||||
example.duration,
|
||||
example.minPricePerBytePerSecond,
|
||||
totalCollateral,
|
||||
enabled = true,
|
||||
until = -1.SecondsSince1970,
|
||||
)
|
||||
|
||||
check result.isErr
|
||||
check result.error of UntilOutOfBoundsError
|
||||
|
||||
test "update an availability fails when trying set until with a negative value":
|
||||
let until = getTime().toUnix()
|
||||
let availability = createAvailability(until = until)
|
||||
|
||||
availability.until = -1
|
||||
|
||||
let result = await reservations.update(availability)
|
||||
check result.isErr
|
||||
check result.error of UntilOutOfBoundsError
|
||||
|
||||
test "reservation can be partially released":
|
||||
let availability = createAvailability()
|
||||
let reservation = createReservation(availability)
|
||||
@ -285,7 +342,9 @@ asyncchecksuite "Reservations module":
|
||||
|
||||
test "OnAvailabilitySaved called when availability is created":
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(
|
||||
a: Availability
|
||||
) {.gcsafe, async: (raises: []).} =
|
||||
added = a
|
||||
|
||||
let availability = createAvailability()
|
||||
@ -295,7 +354,9 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved called when availability size is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(
|
||||
a: Availability
|
||||
) {.gcsafe, async: (raises: []).} =
|
||||
added = a
|
||||
availability.freeSize += 1
|
||||
discard await reservations.update(availability)
|
||||
@ -305,7 +366,21 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved is not called when availability size is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(
|
||||
a: Availability
|
||||
) {.gcsafe, async: (raises: []).} =
|
||||
called = true
|
||||
availability.freeSize -= 1.uint64
|
||||
discard await reservations.update(availability)
|
||||
|
||||
check not called
|
||||
|
||||
test "OnAvailabilitySaved is not called when availability is disabled":
|
||||
var availability = createAvailability(enabled = false)
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(
|
||||
a: Availability
|
||||
) {.gcsafe, async: (raises: []).} =
|
||||
called = true
|
||||
availability.freeSize -= 1
|
||||
discard await reservations.update(availability)
|
||||
@ -315,7 +390,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved called when availability duration is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
added = a
|
||||
availability.duration += 1
|
||||
discard await reservations.update(availability)
|
||||
@ -325,7 +400,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved is not called when availability duration is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
called = true
|
||||
availability.duration -= 1
|
||||
discard await reservations.update(availability)
|
||||
@ -335,7 +410,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved called when availability minPricePerBytePerSecond is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
added = a
|
||||
availability.minPricePerBytePerSecond += 1.u256
|
||||
discard await reservations.update(availability)
|
||||
@ -345,7 +420,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved is not called when availability minPricePerBytePerSecond is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
called = true
|
||||
availability.minPricePerBytePerSecond -= 1.u256
|
||||
discard await reservations.update(availability)
|
||||
@ -355,7 +430,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved called when availability totalCollateral is increased":
|
||||
var availability = createAvailability()
|
||||
var added: Availability
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
added = a
|
||||
availability.totalCollateral = availability.totalCollateral + 1.u256
|
||||
discard await reservations.update(availability)
|
||||
@ -365,7 +440,7 @@ asyncchecksuite "Reservations module":
|
||||
test "OnAvailabilitySaved is not called when availability totalCollateral is decreased":
|
||||
var availability = createAvailability()
|
||||
var called = false
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async.} =
|
||||
reservations.OnAvailabilitySaved = proc(a: Availability) {.async: (raises: []).} =
|
||||
called = true
|
||||
availability.totalCollateral = availability.totalCollateral - 1.u256
|
||||
discard await reservations.update(availability)
|
||||
@ -374,32 +449,69 @@ asyncchecksuite "Reservations module":
|
||||
|
||||
test "availabilities can be found":
|
||||
let availability = createAvailability()
|
||||
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte, validUntil,
|
||||
)
|
||||
|
||||
check found.isSome
|
||||
check found.get == availability
|
||||
|
||||
test "does not find an availability when is it disabled":
|
||||
let availability = createAvailability(enabled = false)
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte, validUntil,
|
||||
)
|
||||
|
||||
check found.isNone
|
||||
|
||||
test "finds an availability when the until date is after the duration":
|
||||
let example = Availability.example(collateralPerByte)
|
||||
let until = getTime().toUnix() + example.duration.SecondsSince1970
|
||||
let availability = createAvailability(until = until)
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte, validUntil,
|
||||
)
|
||||
|
||||
check found.isSome
|
||||
check found.get == availability
|
||||
|
||||
test "does not find an availability when the until date is before the duration":
|
||||
let example = Availability.example(collateralPerByte)
|
||||
let until = getTime().toUnix() + 1.SecondsSince1970
|
||||
let availability = createAvailability(until = until)
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte, validUntil,
|
||||
)
|
||||
|
||||
check found.isNone
|
||||
|
||||
test "non-matching availabilities are not found":
|
||||
let availability = createAvailability()
|
||||
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize + 1,
|
||||
availability.duration,
|
||||
availability.minPricePerBytePerSecond,
|
||||
collateralPerByte,
|
||||
validUntil,
|
||||
)
|
||||
|
||||
check found.isNone
|
||||
|
||||
test "non-existent availability cannot be found":
|
||||
let availability = Availability.example
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
let found = await reservations.findAvailability(
|
||||
availability.freeSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte,
|
||||
availability.minPricePerBytePerSecond, collateralPerByte, validUntil,
|
||||
)
|
||||
|
||||
check found.isNone
|
||||
@ -420,7 +532,12 @@ asyncchecksuite "Reservations module":
|
||||
|
||||
test "fails to create availability with size that is larger than available quota":
|
||||
let created = await reservations.createAvailability(
|
||||
DefaultQuotaBytes.uint64 + 1, uint64.example, UInt256.example, UInt256.example
|
||||
DefaultQuotaBytes.uint64 + 1,
|
||||
uint64.example,
|
||||
UInt256.example,
|
||||
UInt256.example,
|
||||
enabled = true,
|
||||
until = 0.SecondsSince1970,
|
||||
)
|
||||
check created.isErr
|
||||
check created.error of ReserveFailedError
|
||||
|
||||
@ -14,6 +14,7 @@ import pkg/codex/stores/repostore
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/node
|
||||
import pkg/codex/utils/asyncstatemachine
|
||||
import times
|
||||
import ../../asynctest
|
||||
import ../helpers
|
||||
import ../helpers/mockmarket
|
||||
@ -152,6 +153,8 @@ asyncchecksuite "Sales":
|
||||
duration = 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
enabled = true,
|
||||
until = 0.SecondsSince1970,
|
||||
)
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
@ -221,10 +224,11 @@ asyncchecksuite "Sales":
|
||||
let key = availability.id.key.get
|
||||
(waitFor reservations.get(key, Availability)).get
|
||||
|
||||
proc createAvailability() =
|
||||
proc createAvailability(enabled = true, until = 0.SecondsSince1970) =
|
||||
let a = waitFor reservations.createAvailability(
|
||||
availability.totalSize, availability.duration,
|
||||
availability.minPricePerBytePerSecond, availability.totalCollateral,
|
||||
availability.minPricePerBytePerSecond, availability.totalCollateral, enabled,
|
||||
until,
|
||||
)
|
||||
availability = a.get # update id
|
||||
|
||||
@ -380,14 +384,14 @@ asyncchecksuite "Sales":
|
||||
check eventually getAvailability().freeSize ==
|
||||
availability.freeSize - request.ask.slotSize
|
||||
|
||||
test "non-downloaded bytes are returned to availability once finished":
|
||||
test "bytes are returned to availability once finished":
|
||||
var slotIndex = 0.uint64
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
slotIndex = slot
|
||||
let blk = bt.Block.new(@[1.byte]).get
|
||||
await onBatch(@[blk])
|
||||
await onBatch(blk.repeat(request.ask.slotSize))
|
||||
|
||||
let sold = newFuture[void]()
|
||||
sales.onSale = proc(request: StorageRequest, slotIndex: uint64) =
|
||||
@ -403,7 +407,7 @@ asyncchecksuite "Sales":
|
||||
market.slotState[request.slotId(slotIndex)] = SlotState.Finished
|
||||
clock.advance(request.ask.duration.int64)
|
||||
|
||||
check eventually getAvailability().freeSize == origSize - 1
|
||||
check eventually getAvailability().freeSize == origSize
|
||||
|
||||
test "ignores download when duration not long enough":
|
||||
availability.duration = request.ask.duration - 1
|
||||
@ -439,6 +443,34 @@ asyncchecksuite "Sales":
|
||||
market.slotState[request.slotId(3.uint64)] = SlotState.Filled
|
||||
check wasIgnored()
|
||||
|
||||
test "ignores request when availability is not enabled":
|
||||
createAvailability(enabled = false)
|
||||
await market.requestStorage(request)
|
||||
check wasIgnored()
|
||||
|
||||
test "ignores request when availability until terminates before the duration":
|
||||
let until = getTime().toUnix()
|
||||
createAvailability(until = until)
|
||||
await market.requestStorage(request)
|
||||
|
||||
check wasIgnored()
|
||||
|
||||
test "retrieves request when availability until terminates after the duration":
|
||||
let requestEnd = getTime().toUnix() + cast[int64](request.ask.duration)
|
||||
let until = requestEnd + 1
|
||||
createAvailability(until = until)
|
||||
|
||||
var storingRequest: StorageRequest
|
||||
sales.onStore = proc(
|
||||
request: StorageRequest, slot: uint64, onBatch: BatchProc, isRepairing = false
|
||||
): Future[?!void] {.async.} =
|
||||
storingRequest = request
|
||||
return success()
|
||||
|
||||
market.requestEnds[request.id] = requestEnd
|
||||
await market.requestStorage(request)
|
||||
check eventually storingRequest == request
|
||||
|
||||
test "retrieves and stores data locally":
|
||||
var storingRequest: StorageRequest
|
||||
var storingSlot: uint64
|
||||
@ -563,6 +595,8 @@ asyncchecksuite "Sales":
|
||||
# by other slots
|
||||
request.ask.slots = 1
|
||||
market.requestExpiry[request.id] = expiry
|
||||
market.requestEnds[request.id] =
|
||||
getTime().toUnix() + cast[int64](request.ask.duration)
|
||||
|
||||
let origSize = availability.freeSize
|
||||
sales.onStore = proc(
|
||||
@ -621,10 +655,28 @@ asyncchecksuite "Sales":
|
||||
|
||||
test "deletes inactive reservations on load":
|
||||
createAvailability()
|
||||
let validUntil = getTime().toUnix() + 30.SecondsSince1970
|
||||
discard await reservations.createReservation(
|
||||
availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example
|
||||
availability.id, 100.uint64, RequestId.example, 0.uint64, UInt256.example,
|
||||
validUntil,
|
||||
)
|
||||
check (await reservations.all(Reservation)).get.len == 1
|
||||
await sales.load()
|
||||
check (await reservations.all(Reservation)).get.len == 0
|
||||
check getAvailability().freeSize == availability.freeSize # was restored
|
||||
|
||||
test "update an availability fails when trying change the until date before an existing reservation":
|
||||
let until = getTime().toUnix() + 300.SecondsSince1970
|
||||
createAvailability(until = until)
|
||||
|
||||
market.requestEnds[request.id] =
|
||||
getTime().toUnix() + cast[int64](request.ask.duration)
|
||||
|
||||
await market.requestStorage(request)
|
||||
await allowRequestToStart()
|
||||
|
||||
availability.until = getTime().toUnix()
|
||||
|
||||
let result = await reservations.update(availability)
|
||||
check result.isErr
|
||||
check result.error of UntilOutOfBoundsError
|
||||
|
||||
@ -52,21 +52,21 @@ asyncchecksuite "Timer":
|
||||
|
||||
test "Start timer1 should execute callback":
|
||||
startNumbersTimer()
|
||||
check eventually output == "0"
|
||||
check eventually(output == "0", pollInterval = 10)
|
||||
|
||||
test "Start timer1 should execute callback multiple times":
|
||||
startNumbersTimer()
|
||||
check eventually output == "012"
|
||||
check eventually(output == "012", pollInterval = 10)
|
||||
|
||||
test "Starting timer1 multiple times has no impact":
|
||||
startNumbersTimer()
|
||||
startNumbersTimer()
|
||||
startNumbersTimer()
|
||||
check eventually output == "01234"
|
||||
check eventually(output == "01234", pollInterval = 10)
|
||||
|
||||
test "Stop timer1 should stop execution of the callback":
|
||||
startNumbersTimer()
|
||||
check eventually output == "012"
|
||||
check eventually(output == "012", pollInterval = 10)
|
||||
await timer1.stop()
|
||||
await sleepAsync(30.milliseconds)
|
||||
let stoppedOutput = output
|
||||
@ -81,4 +81,4 @@ asyncchecksuite "Timer":
|
||||
test "Starting both timers should execute callbacks sequentially":
|
||||
startNumbersTimer()
|
||||
startLettersTimer()
|
||||
check eventually output == "0a1b2c3d4e"
|
||||
check eventually(output == "0a1b2c3d4e", pollInterval = 10)
|
||||
|
||||
@ -548,7 +548,7 @@ ethersuite "On-Chain Market":
|
||||
switchAccount(host)
|
||||
await market.reserveSlot(request.id, 0.uint64)
|
||||
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
|
||||
let filledAt = (await ethProvider.currentTime()) - 1.u256
|
||||
let filledAt = (await ethProvider.currentTime())
|
||||
|
||||
for slotIndex in 1 ..< request.ask.slots:
|
||||
await market.reserveSlot(request.id, slotIndex.uint64)
|
||||
@ -575,7 +575,7 @@ ethersuite "On-Chain Market":
|
||||
switchAccount(host)
|
||||
await market.reserveSlot(request.id, 0.uint64)
|
||||
await market.fillSlot(request.id, 0.uint64, proof, request.ask.collateralPerSlot)
|
||||
let filledAt = (await ethProvider.currentTime()) - 1.u256
|
||||
let filledAt = (await ethProvider.currentTime())
|
||||
|
||||
for slotIndex in 1 ..< request.ask.slots:
|
||||
await market.reserveSlot(request.id, slotIndex.uint64)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import helpers/multisetup
|
||||
import helpers/trackers
|
||||
import helpers/templeveldb
|
||||
|
||||
import std/times
|
||||
import std/sequtils, chronos
|
||||
|
||||
export multisetup, trackers, templeveldb
|
||||
|
||||
@ -4,119 +4,216 @@ import std/strutils
|
||||
from pkg/libp2p import Cid, `$`, init
|
||||
import pkg/stint
|
||||
import pkg/questionable/results
|
||||
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
|
||||
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient, httptable]
|
||||
import pkg/codex/logutils
|
||||
import pkg/codex/rest/json
|
||||
import pkg/codex/purchasing
|
||||
import pkg/codex/errors
|
||||
import pkg/codex/sales/reservations
|
||||
|
||||
export purchasing
|
||||
export purchasing, httptable, httpclient
|
||||
|
||||
type CodexClient* = ref object
|
||||
baseurl: string
|
||||
httpClients: seq[HttpClient]
|
||||
|
||||
type CodexClientError* = object of CatchableError
|
||||
|
||||
const HttpClientTimeoutMs = 60 * 1000
|
||||
session: HttpSessionRef
|
||||
|
||||
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
||||
CodexClient(baseurl: baseurl, httpClients: newSeq[HttpClient]())
|
||||
CodexClient(session: HttpSessionRef.new(), baseurl: baseurl)
|
||||
|
||||
proc http*(client: CodexClient): HttpClient =
|
||||
let httpClient = newHttpClient(timeout = HttpClientTimeoutMs)
|
||||
client.httpClients.insert(httpClient)
|
||||
return httpClient
|
||||
proc close*(self: CodexClient): Future[void] {.async: (raises: []).} =
|
||||
await self.session.closeWait()
|
||||
|
||||
proc close*(client: CodexClient): void =
|
||||
for httpClient in client.httpClients:
|
||||
httpClient.close()
|
||||
proc request(
|
||||
self: CodexClient,
|
||||
httpMethod: httputils.HttpMethod,
|
||||
url: string,
|
||||
body: openArray[char] = [],
|
||||
headers: openArray[HttpHeaderTuple] = [],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
HttpClientRequestRef
|
||||
.new(
|
||||
self.session,
|
||||
url,
|
||||
httpMethod,
|
||||
version = HttpVersion11,
|
||||
flags = {},
|
||||
maxResponseHeadersSize = HttpMaxHeadersSize,
|
||||
headers = headers,
|
||||
body = body.toOpenArrayByte(0, len(body) - 1),
|
||||
).get
|
||||
.send()
|
||||
|
||||
proc info*(client: CodexClient): ?!JsonNode =
|
||||
let url = client.baseurl & "/debug/info"
|
||||
JsonNode.parse(client.http().getContent(url))
|
||||
proc post*(
|
||||
self: CodexClient,
|
||||
url: string,
|
||||
body: string = "",
|
||||
headers: seq[HttpHeaderTuple] = @[],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodPost, url, headers = headers, body = body)
|
||||
|
||||
proc setLogLevel*(client: CodexClient, level: string) =
|
||||
let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
||||
let response = client.http().request(url, httpMethod = HttpPost, headers = headers)
|
||||
assert response.status == "200 OK"
|
||||
proc get(
|
||||
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodGet, url, headers = headers)
|
||||
|
||||
proc upload*(client: CodexClient, contents: string): ?!Cid =
|
||||
let response = client.http().post(client.baseurl & "/data", contents)
|
||||
assert response.status == "200 OK"
|
||||
Cid.init(response.body).mapFailure
|
||||
proc delete(
|
||||
self: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodDelete, url, headers = headers)
|
||||
|
||||
proc upload*(client: CodexClient, bytes: seq[byte]): ?!Cid =
|
||||
client.upload(string.fromBytes(bytes))
|
||||
proc patch*(
|
||||
self: CodexClient,
|
||||
url: string,
|
||||
body: string = "",
|
||||
headers: seq[HttpHeaderTuple] = @[],
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return self.request(MethodPatch, url, headers = headers, body = body)
|
||||
|
||||
proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
|
||||
let response = client.http().get(
|
||||
client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")
|
||||
)
|
||||
proc body*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
return bytesToString (await response.getBodyBytes())
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc getContent(
|
||||
client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.get(url, headers)
|
||||
return await response.body
|
||||
|
||||
success response.body
|
||||
proc info*(
|
||||
client: CodexClient
|
||||
): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.get(client.baseurl & "/debug/info")
|
||||
return JsonNode.parse(await response.body)
|
||||
|
||||
proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string =
|
||||
let response =
|
||||
client.http().get(client.baseurl & "/data/" & $cid & "/network/manifest")
|
||||
proc setLogLevel*(
|
||||
client: CodexClient, level: string
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let
|
||||
url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
|
||||
headers = @[("Content-Type", "text/plain")]
|
||||
response = await client.post(url, headers = headers, body = "")
|
||||
assert response.status == 200
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc uploadRaw*(
|
||||
client: CodexClient, contents: string, headers: seq[HttpHeaderTuple] = @[]
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.post(client.baseurl & "/data", body = contents, headers = headers)
|
||||
|
||||
success response.body
|
||||
proc upload*(
|
||||
client: CodexClient, contents: string
|
||||
): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.uploadRaw(contents)
|
||||
assert response.status == 200
|
||||
Cid.init(await response.body).mapFailure
|
||||
|
||||
proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string =
|
||||
let response = client.http().post(client.baseurl & "/data/" & $cid & "/network")
|
||||
proc upload*(
|
||||
client: CodexClient, bytes: seq[byte]
|
||||
): Future[?!Cid] {.async: (raw: true).} =
|
||||
return client.upload(string.fromBytes(bytes))
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
|
||||
success response.body
|
||||
proc downloadRaw*(
|
||||
client: CodexClient, cid: string, local = false
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return
|
||||
client.get(client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"))
|
||||
|
||||
proc downloadBytes*(
|
||||
client: CodexClient, cid: Cid, local = false
|
||||
): Future[?!seq[byte]] {.async.} =
|
||||
let uri = client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")
|
||||
): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.downloadRaw($cid, local = local)
|
||||
|
||||
let response = client.http().get(uri)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure("fetch failed with status " & $response.status)
|
||||
success await response.getBodyBytes()
|
||||
|
||||
success response.body.toBytes
|
||||
proc download*(
|
||||
client: CodexClient, cid: Cid, local = false
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
without response =? await client.downloadBytes(cid, local = local), err:
|
||||
return failure(err)
|
||||
return success bytesToString(response)
|
||||
|
||||
proc delete*(client: CodexClient, cid: Cid): ?!void =
|
||||
let
|
||||
url = client.baseurl & "/data/" & $cid
|
||||
response = client.http().delete(url)
|
||||
proc downloadNoStream*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.post(client.baseurl & "/data/" & $cid & "/network")
|
||||
|
||||
if response.status != "204 No Content":
|
||||
return failure(response.status)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
success await response.body
|
||||
|
||||
proc downloadManifestOnly*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response =
|
||||
await client.get(client.baseurl & "/data/" & $cid & "/network/manifest")
|
||||
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
success await response.body
|
||||
|
||||
proc deleteRaw*(
|
||||
client: CodexClient, cid: string
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.delete(client.baseurl & "/data/" & cid)
|
||||
|
||||
proc delete*(
|
||||
client: CodexClient, cid: Cid
|
||||
): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.deleteRaw($cid)
|
||||
|
||||
if response.status != 204:
|
||||
return failure($response.status)
|
||||
|
||||
success()
|
||||
|
||||
proc list*(client: CodexClient): ?!RestContentList =
|
||||
let url = client.baseurl & "/data"
|
||||
let response = client.http().get(url)
|
||||
proc listRaw*(
|
||||
client: CodexClient
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
return client.get(client.baseurl & "/data")
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
proc list*(
|
||||
client: CodexClient
|
||||
): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.listRaw()
|
||||
|
||||
RestContentList.fromJson(response.body)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
proc space*(client: CodexClient): ?!RestRepoStore =
|
||||
RestContentList.fromJson(await response.body)
|
||||
|
||||
proc space*(
|
||||
client: CodexClient
|
||||
): Future[?!RestRepoStore] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/space"
|
||||
let response = client.http().get(url)
|
||||
let response = await client.get(url)
|
||||
|
||||
if response.status != "200 OK":
|
||||
return failure(response.status)
|
||||
if response.status != 200:
|
||||
return failure($response.status)
|
||||
|
||||
RestRepoStore.fromJson(response.body)
|
||||
RestRepoStore.fromJson(await response.body)
|
||||
|
||||
proc requestStorageRaw*(
|
||||
client: CodexClient,
|
||||
@ -128,7 +225,9 @@ proc requestStorageRaw*(
|
||||
expiry: uint64 = 0,
|
||||
nodes: uint = 3,
|
||||
tolerance: uint = 1,
|
||||
): Response =
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
## Call request storage REST endpoint
|
||||
##
|
||||
let url = client.baseurl & "/storage/request/" & $cid
|
||||
@ -145,7 +244,7 @@ proc requestStorageRaw*(
|
||||
if expiry != 0:
|
||||
json["expiry"] = %($expiry)
|
||||
|
||||
return client.http().post(url, $json)
|
||||
return client.post(url, $json)
|
||||
|
||||
proc requestStorage*(
|
||||
client: CodexClient,
|
||||
@ -157,43 +256,47 @@ proc requestStorage*(
|
||||
collateralPerByte: UInt256,
|
||||
nodes: uint = 3,
|
||||
tolerance: uint = 1,
|
||||
): ?!PurchaseId =
|
||||
): Future[?!PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Call request storage REST endpoint
|
||||
##
|
||||
let response = client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes, tolerance,
|
||||
)
|
||||
if response.status != "200 OK":
|
||||
doAssert(false, response.body)
|
||||
PurchaseId.fromHex(response.body).catch
|
||||
let
|
||||
response = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes, tolerance,
|
||||
)
|
||||
body = await response.body
|
||||
|
||||
proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase =
|
||||
if response.status != 200:
|
||||
doAssert(false, body)
|
||||
PurchaseId.fromHex(body).catch
|
||||
|
||||
proc getPurchase*(
|
||||
client: CodexClient, purchaseId: PurchaseId
|
||||
): Future[?!RestPurchase] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex
|
||||
try:
|
||||
let body = client.http().getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
return RestPurchase.fromJson(body)
|
||||
except CatchableError as e:
|
||||
return failure e.msg
|
||||
|
||||
proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent =
|
||||
proc getSalesAgent*(
|
||||
client: CodexClient, slotId: SlotId
|
||||
): Future[?!RestSalesAgent] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let url = client.baseurl & "/sales/slots/" & slotId.toHex
|
||||
try:
|
||||
let body = client.http().getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
return RestSalesAgent.fromJson(body)
|
||||
except CatchableError as e:
|
||||
return failure e.msg
|
||||
|
||||
proc getSlots*(client: CodexClient): ?!seq[Slot] =
|
||||
let url = client.baseurl & "/sales/slots"
|
||||
let body = client.http().getContent(url)
|
||||
seq[Slot].fromJson(body)
|
||||
|
||||
proc postAvailability*(
|
||||
proc postAvailabilityRaw*(
|
||||
client: CodexClient,
|
||||
totalSize, duration: uint64,
|
||||
minPricePerBytePerSecond, totalCollateral: UInt256,
|
||||
): ?!Availability =
|
||||
enabled: ?bool = bool.none,
|
||||
until: ?SecondsSince1970 = SecondsSince1970.none,
|
||||
): Future[HttpClientResponseRef] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Post sales availability endpoint
|
||||
##
|
||||
let url = client.baseurl & "/sales/availability"
|
||||
@ -203,18 +306,43 @@ proc postAvailability*(
|
||||
"duration": duration,
|
||||
"minPricePerBytePerSecond": minPricePerBytePerSecond,
|
||||
"totalCollateral": totalCollateral,
|
||||
"enabled": enabled,
|
||||
"until": until,
|
||||
}
|
||||
let response = client.http().post(url, $json)
|
||||
doAssert response.status == "201 Created",
|
||||
"expected 201 Created, got " & response.status & ", body: " & response.body
|
||||
Availability.fromJson(response.body)
|
||||
return await client.post(url, $json)
|
||||
|
||||
proc postAvailability*(
|
||||
client: CodexClient,
|
||||
totalSize, duration: uint64,
|
||||
minPricePerBytePerSecond, totalCollateral: UInt256,
|
||||
enabled: ?bool = bool.none,
|
||||
until: ?SecondsSince1970 = SecondsSince1970.none,
|
||||
): Future[?!Availability] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.postAvailabilityRaw(
|
||||
totalSize = totalSize,
|
||||
duration = duration,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
enabled = enabled,
|
||||
until = until,
|
||||
)
|
||||
|
||||
let body = await response.body
|
||||
|
||||
doAssert response.status == 201,
|
||||
"expected 201 Created, got " & $response.status & ", body: " & body
|
||||
Availability.fromJson(body)
|
||||
|
||||
proc patchAvailabilityRaw*(
|
||||
client: CodexClient,
|
||||
availabilityId: AvailabilityId,
|
||||
totalSize, freeSize, duration: ?uint64 = uint64.none,
|
||||
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
|
||||
): Response =
|
||||
enabled: ?bool = bool.none,
|
||||
until: ?SecondsSince1970 = SecondsSince1970.none,
|
||||
): Future[HttpClientResponseRef] {.
|
||||
async: (raw: true, raises: [CancelledError, HttpError])
|
||||
.} =
|
||||
## Updates availability
|
||||
##
|
||||
let url = client.baseurl & "/sales/availability/" & $availabilityId
|
||||
@ -237,66 +365,63 @@ proc patchAvailabilityRaw*(
|
||||
if totalCollateral =? totalCollateral:
|
||||
json["totalCollateral"] = %totalCollateral
|
||||
|
||||
client.http().patch(url, $json)
|
||||
if enabled =? enabled:
|
||||
json["enabled"] = %enabled
|
||||
|
||||
if until =? until:
|
||||
json["until"] = %until
|
||||
|
||||
client.patch(url, $json)
|
||||
|
||||
proc patchAvailability*(
|
||||
client: CodexClient,
|
||||
availabilityId: AvailabilityId,
|
||||
totalSize, duration: ?uint64 = uint64.none,
|
||||
minPricePerBytePerSecond, totalCollateral: ?UInt256 = UInt256.none,
|
||||
): void =
|
||||
let response = client.patchAvailabilityRaw(
|
||||
enabled: ?bool = bool.none,
|
||||
until: ?SecondsSince1970 = SecondsSince1970.none,
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let response = await client.patchAvailabilityRaw(
|
||||
availabilityId,
|
||||
totalSize = totalSize,
|
||||
duration = duration,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
enabled = enabled,
|
||||
until = until,
|
||||
)
|
||||
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status
|
||||
doAssert response.status == 204, "expected No Content, got " & $response.status
|
||||
|
||||
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =
|
||||
proc getAvailabilities*(
|
||||
client: CodexClient
|
||||
): Future[?!seq[Availability]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Call sales availability REST endpoint
|
||||
let url = client.baseurl & "/sales/availability"
|
||||
let body = client.http().getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
seq[Availability].fromJson(body)
|
||||
|
||||
proc getAvailabilityReservations*(
|
||||
client: CodexClient, availabilityId: AvailabilityId
|
||||
): ?!seq[Reservation] =
|
||||
): Future[?!seq[Reservation]] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
## Retrieves Availability's Reservations
|
||||
let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations"
|
||||
let body = client.http().getContent(url)
|
||||
let body = await client.getContent(url)
|
||||
seq[Reservation].fromJson(body)
|
||||
|
||||
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option .? state == some state
|
||||
proc purchaseStateIs*(
|
||||
client: CodexClient, id: PurchaseId, state: string
|
||||
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
(await client.getPurchase(id)).option .? state == some state
|
||||
|
||||
proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool =
|
||||
client.getSalesAgent(id).option .? state == some state
|
||||
proc saleStateIs*(
|
||||
client: CodexClient, id: SlotId, state: string
|
||||
): Future[bool] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
(await client.getSalesAgent(id)).option .? state == some state
|
||||
|
||||
proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId =
|
||||
return client.getPurchase(id).option .? requestId
|
||||
proc requestId*(
|
||||
client: CodexClient, id: PurchaseId
|
||||
): Future[?RequestId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
return (await client.getPurchase(id)).option .? requestId
|
||||
|
||||
proc uploadRaw*(
|
||||
client: CodexClient, contents: string, headers = newHttpHeaders()
|
||||
): Response =
|
||||
return client.http().request(
|
||||
client.baseurl & "/data",
|
||||
body = contents,
|
||||
httpMethod = HttpPost,
|
||||
headers = headers,
|
||||
)
|
||||
|
||||
proc listRaw*(client: CodexClient): Response =
|
||||
return client.http().request(client.baseurl & "/data", httpMethod = HttpGet)
|
||||
|
||||
proc downloadRaw*(
|
||||
client: CodexClient, cid: string, local = false, httpClient = client.http()
|
||||
): Response =
|
||||
return httpClient.request(
|
||||
client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"),
|
||||
httpMethod = HttpGet,
|
||||
)
|
||||
|
||||
proc deleteRaw*(client: CodexClient, cid: string): Response =
|
||||
return client.http().request(client.baseurl & "/data/" & cid, httpMethod = HttpDelete)
|
||||
proc buildUrl*(client: CodexClient, path: string): string =
|
||||
return client.baseurl & path
|
||||
|
||||
@ -68,7 +68,7 @@ method stop*(node: CodexProcess) {.async.} =
|
||||
|
||||
trace "stopping codex client"
|
||||
if client =? node.client:
|
||||
client.close()
|
||||
await client.close()
|
||||
node.client = none CodexClient
|
||||
|
||||
method removeDataDir*(node: CodexProcess) =
|
||||
|
||||
@ -60,13 +60,13 @@ template marketplacesuite*(name: string, body: untyped) =
|
||||
duration: uint64,
|
||||
collateralPerByte: UInt256,
|
||||
minPricePerBytePerSecond: UInt256,
|
||||
) =
|
||||
): Future[void] {.async: (raises: [CancelledError, HttpError, ConfigurationError]).} =
|
||||
let totalCollateral = datasetSize.u256 * collateralPerByte
|
||||
# post availability to each provider
|
||||
for i in 0 ..< providers().len:
|
||||
let provider = providers()[i].client
|
||||
|
||||
discard provider.postAvailability(
|
||||
discard await provider.postAvailability(
|
||||
totalSize = datasetSize,
|
||||
duration = duration.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
@ -83,16 +83,18 @@ template marketplacesuite*(name: string, body: untyped) =
|
||||
expiry: uint64 = 4.periods,
|
||||
nodes = providers().len,
|
||||
tolerance = 0,
|
||||
): Future[PurchaseId] {.async.} =
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
proofProbability = proofProbability,
|
||||
collateralPerByte = collateralPerByte,
|
||||
pricePerBytePerSecond = pricePerBytePerSecond,
|
||||
nodes = nodes.uint,
|
||||
tolerance = tolerance.uint,
|
||||
): Future[PurchaseId] {.async: (raises: [CancelledError, HttpError]).} =
|
||||
let id = (
|
||||
await client.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
proofProbability = proofProbability,
|
||||
collateralPerByte = collateralPerByte,
|
||||
pricePerBytePerSecond = pricePerBytePerSecond,
|
||||
nodes = nodes.uint,
|
||||
tolerance = tolerance.uint,
|
||||
)
|
||||
).get
|
||||
|
||||
return id
|
||||
|
||||
@ -37,10 +37,12 @@ type
|
||||
|
||||
MultiNodeSuiteError = object of CatchableError
|
||||
|
||||
const jsonRpcProviderUrl* = "http://127.0.0.1:8545"
|
||||
|
||||
proc raiseMultiNodeSuiteError(msg: string) =
|
||||
raise newException(MultiNodeSuiteError, msg)
|
||||
|
||||
proc nextFreePort(startPort: int): Future[int] {.async.} =
|
||||
proc nextFreePort*(startPort: int): Future[int] {.async.} =
|
||||
proc client(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
await transp.closeWait()
|
||||
|
||||
@ -60,6 +62,15 @@ proc nextFreePort(startPort: int): Future[int] {.async.} =
|
||||
trace "port is not free", port
|
||||
inc port
|
||||
|
||||
proc sanitize(pathSegment: string): string =
|
||||
var sanitized = pathSegment
|
||||
for invalid in invalidFilenameChars.items:
|
||||
sanitized = sanitized.replace(invalid, '_').replace(' ', '_')
|
||||
sanitized
|
||||
|
||||
proc getTempDirName*(starttime: string, role: Role, roleIdx: int): string =
|
||||
getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx)
|
||||
|
||||
template multinodesuite*(name: string, body: untyped) =
|
||||
asyncchecksuite name:
|
||||
# Following the problem described here:
|
||||
@ -82,7 +93,6 @@ template multinodesuite*(name: string, body: untyped) =
|
||||
# .withEthProvider("ws://localhost:8545")
|
||||
# .some,
|
||||
# ...
|
||||
let jsonRpcProviderUrl = "http://127.0.0.1:8545"
|
||||
var running {.inject, used.}: seq[RunningNode]
|
||||
var bootstrapNodes: seq[string]
|
||||
let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss")
|
||||
@ -148,8 +158,7 @@ template multinodesuite*(name: string, body: untyped) =
|
||||
raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx &
|
||||
", not enough eth accounts."
|
||||
|
||||
let datadir =
|
||||
getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx)
|
||||
let datadir = getTempDirName(starttime, role, roleIdx)
|
||||
|
||||
try:
|
||||
if config.logFile.isSome:
|
||||
@ -275,8 +284,10 @@ template multinodesuite*(name: string, body: untyped) =
|
||||
fail()
|
||||
quit(1)
|
||||
|
||||
proc updateBootstrapNodes(node: CodexProcess) =
|
||||
without ninfo =? node.client.info():
|
||||
proc updateBootstrapNodes(
|
||||
node: CodexProcess
|
||||
): Future[void] {.async: (raises: [CatchableError]).} =
|
||||
without ninfo =? await node.client.info():
|
||||
# raise CatchableError instead of Defect (with .get or !) so we
|
||||
# can gracefully shutdown and prevent zombies
|
||||
raiseMultiNodeSuiteError "Failed to get node info"
|
||||
@ -315,14 +326,14 @@ template multinodesuite*(name: string, body: untyped) =
|
||||
for config in clients.configs:
|
||||
let node = await startClientNode(config)
|
||||
running.add RunningNode(role: Role.Client, node: node)
|
||||
CodexProcess(node).updateBootstrapNodes()
|
||||
await CodexProcess(node).updateBootstrapNodes()
|
||||
|
||||
if var providers =? nodeConfigs.providers:
|
||||
failAndTeardownOnError "failed to start provider nodes":
|
||||
for config in providers.configs.mitems:
|
||||
let node = await startProviderNode(config)
|
||||
running.add RunningNode(role: Role.Provider, node: node)
|
||||
CodexProcess(node).updateBootstrapNodes()
|
||||
await CodexProcess(node).updateBootstrapNodes()
|
||||
|
||||
if var validators =? nodeConfigs.validators:
|
||||
failAndTeardownOnError "failed to start validator nodes":
|
||||
|
||||
@ -18,11 +18,11 @@ multinodesuite "Node block expiration tests":
|
||||
let client = clients()[0]
|
||||
let clientApi = client.client
|
||||
|
||||
let contentId = clientApi.upload(content).get
|
||||
let contentId = (await clientApi.upload(content)).get
|
||||
|
||||
await sleepAsync(2.seconds)
|
||||
|
||||
let download = clientApi.download(contentId, local = true)
|
||||
let download = await clientApi.download(contentId, local = true)
|
||||
|
||||
check:
|
||||
download.isOk
|
||||
@ -39,12 +39,12 @@ multinodesuite "Node block expiration tests":
|
||||
let client = clients()[0]
|
||||
let clientApi = client.client
|
||||
|
||||
let contentId = clientApi.upload(content).get
|
||||
let contentId = (await clientApi.upload(content)).get
|
||||
|
||||
await sleepAsync(3.seconds)
|
||||
|
||||
let download = clientApi.download(contentId, local = true)
|
||||
let download = await clientApi.download(contentId, local = true)
|
||||
|
||||
check:
|
||||
download.isFailure
|
||||
download.error.msg == "404 Not Found"
|
||||
download.error.msg == "404"
|
||||
|
||||
@ -13,21 +13,18 @@ marketplacesuite "Bug #821 - node crashes during erasure coding":
|
||||
.withLogFile()
|
||||
# uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node", "erasure", "marketplace").some,
|
||||
providers: CodexConfigs.init(nodes = 0)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
|
||||
.some,
|
||||
providers: CodexConfigs.init(nodes = 0).some,
|
||||
):
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let duration = 20.periods
|
||||
let collateralPerByte = 1.u256
|
||||
let expiry = 10.periods
|
||||
let data = await RandomChunker.example(blocks = 8)
|
||||
let client = clients()[0]
|
||||
let clientApi = client.client
|
||||
let
|
||||
pricePerBytePerSecond = 1.u256
|
||||
duration = 20.periods
|
||||
collateralPerByte = 1.u256
|
||||
expiry = 10.periods
|
||||
data = await RandomChunker.example(blocks = 8)
|
||||
client = clients()[0]
|
||||
clientApi = client.client
|
||||
|
||||
let cid = clientApi.upload(data).get
|
||||
let cid = (await clientApi.upload(data)).get
|
||||
|
||||
var requestId = none RequestId
|
||||
proc onStorageRequested(eventResult: ?!StorageRequested) =
|
||||
@ -49,9 +46,11 @@ marketplacesuite "Bug #821 - node crashes during erasure coding":
|
||||
|
||||
check eventually(requestId.isSome, timeout = expiry.int * 1000)
|
||||
|
||||
let request = await marketplace.getRequest(requestId.get)
|
||||
let cidFromRequest = request.content.cid
|
||||
let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
|
||||
let
|
||||
request = await marketplace.getRequest(requestId.get)
|
||||
cidFromRequest = request.content.cid
|
||||
downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
|
||||
|
||||
check downloaded.isOk
|
||||
check downloaded.get.toHex == data.toHex
|
||||
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
import std/times
|
||||
import std/httpclient
|
||||
import ../examples
|
||||
import ../contracts/time
|
||||
import ../contracts/deployment
|
||||
@ -37,15 +39,17 @@ marketplacesuite "Marketplace":
|
||||
let size = 0xFFFFFF.uint64
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
# host makes storage available
|
||||
let availability = host.postAvailability(
|
||||
totalSize = size,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = size.u256 * minPricePerBytePerSecond,
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = size,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = size.u256 * minPricePerBytePerSecond,
|
||||
)
|
||||
).get
|
||||
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let cid = (await client.upload(data)).get
|
||||
let id = await client.requestStorage(
|
||||
cid,
|
||||
duration = 20 * 60.uint64,
|
||||
@ -57,15 +61,17 @@ marketplacesuite "Marketplace":
|
||||
tolerance = ecTolerance,
|
||||
)
|
||||
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check eventually(
|
||||
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
|
||||
)
|
||||
let purchase = (await client.getPurchase(id)).get
|
||||
check purchase.error == none string
|
||||
let availabilities = host.getAvailabilities().get
|
||||
let availabilities = (await host.getAvailabilities()).get
|
||||
check availabilities.len == 1
|
||||
let newSize = availabilities[0].freeSize
|
||||
check newSize > 0 and newSize < size
|
||||
|
||||
let reservations = host.getAvailabilityReservations(availability.id).get
|
||||
let reservations = (await host.getAvailabilityReservations(availability.id)).get
|
||||
check reservations.len == 3
|
||||
check reservations[0].requestId == purchase.requestId
|
||||
|
||||
@ -80,15 +86,17 @@ marketplacesuite "Marketplace":
|
||||
|
||||
# host makes storage available
|
||||
let startBalanceHost = await token.balanceOf(hostAccount)
|
||||
discard host.postAvailability(
|
||||
totalSize = size,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = size.u256 * minPricePerBytePerSecond,
|
||||
discard (
|
||||
await host.postAvailability(
|
||||
totalSize = size,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = size.u256 * minPricePerBytePerSecond,
|
||||
)
|
||||
).get
|
||||
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let cid = (await client.upload(data)).get
|
||||
let id = await client.requestStorage(
|
||||
cid,
|
||||
duration = duration,
|
||||
@ -100,8 +108,10 @@ marketplacesuite "Marketplace":
|
||||
tolerance = ecTolerance,
|
||||
)
|
||||
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check eventually(
|
||||
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
|
||||
)
|
||||
let purchase = (await client.getPurchase(id)).get
|
||||
check purchase.error == none string
|
||||
|
||||
let clientBalanceBeforeFinished = await token.balanceOf(clientAccount)
|
||||
@ -158,7 +168,7 @@ marketplacesuite "Marketplace payouts":
|
||||
# provider makes storage available
|
||||
let datasetSize = datasetSize(blocks, ecNodes, ecTolerance)
|
||||
let totalAvailabilitySize = (datasetSize div 2).truncate(uint64)
|
||||
discard providerApi.postAvailability(
|
||||
discard await providerApi.postAvailability(
|
||||
# make availability size small enough that we can't fill all the slots,
|
||||
# thus causing a cancellation
|
||||
totalSize = totalAvailabilitySize,
|
||||
@ -167,7 +177,7 @@ marketplacesuite "Marketplace payouts":
|
||||
totalCollateral = collateralPerByte * totalAvailabilitySize.u256,
|
||||
)
|
||||
|
||||
let cid = clientApi.upload(data).get
|
||||
let cid = (await clientApi.upload(data)).get
|
||||
|
||||
var slotIdxFilled = none uint64
|
||||
proc onSlotFilled(eventResult: ?!SlotFilled) =
|
||||
@ -189,11 +199,11 @@ marketplacesuite "Marketplace payouts":
|
||||
|
||||
# wait until one slot is filled
|
||||
check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000)
|
||||
let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled)
|
||||
let slotId = slotId(!(await clientApi.requestId(id)), !slotIdxFilled)
|
||||
|
||||
# wait until sale is cancelled
|
||||
await ethProvider.advanceTime(expiry.u256)
|
||||
check eventually providerApi.saleStateIs(slotId, "SaleCancelled")
|
||||
check eventually await providerApi.saleStateIs(slotId, "SaleCancelled")
|
||||
|
||||
await advanceToNextPeriod()
|
||||
|
||||
|
||||
@ -42,14 +42,14 @@ marketplacesuite "Hosts submit regular proofs":
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
let datasetSize =
|
||||
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
|
||||
createAvailabilities(
|
||||
await createAvailabilities(
|
||||
datasetSize.truncate(uint64),
|
||||
duration,
|
||||
collateralPerByte,
|
||||
minPricePerBytePerSecond,
|
||||
)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let cid = (await client0.upload(data)).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
@ -59,13 +59,13 @@ marketplacesuite "Hosts submit regular proofs":
|
||||
tolerance = ecTolerance,
|
||||
)
|
||||
|
||||
let purchase = client0.getPurchase(purchaseId).get
|
||||
let purchase = (await client0.getPurchase(purchaseId)).get
|
||||
check purchase.error == none string
|
||||
|
||||
let slotSize = slotSize(blocks, ecNodes, ecTolerance)
|
||||
|
||||
check eventually(
|
||||
client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
|
||||
await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
|
||||
)
|
||||
|
||||
var proofWasSubmitted = false
|
||||
@ -119,27 +119,29 @@ marketplacesuite "Simulate invalid proofs":
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
let datasetSize =
|
||||
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
|
||||
createAvailabilities(
|
||||
await createAvailabilities(
|
||||
datasetSize.truncate(uint64),
|
||||
duration,
|
||||
collateralPerByte,
|
||||
minPricePerBytePerSecond,
|
||||
)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let cid = (await client0.upload(data)).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
nodes = ecNodes,
|
||||
tolerance = ecTolerance,
|
||||
proofProbability = 1.u256,
|
||||
let purchaseId = (
|
||||
await client0.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
duration = duration,
|
||||
nodes = ecNodes,
|
||||
tolerance = ecTolerance,
|
||||
proofProbability = 1.u256,
|
||||
)
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
let requestId = (await client0.requestId(purchaseId)).get
|
||||
|
||||
check eventually(
|
||||
client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
|
||||
await client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000
|
||||
)
|
||||
|
||||
var slotWasFreed = false
|
||||
@ -182,14 +184,14 @@ marketplacesuite "Simulate invalid proofs":
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
let datasetSize =
|
||||
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
|
||||
createAvailabilities(
|
||||
await createAvailabilities(
|
||||
datasetSize.truncate(uint64),
|
||||
duration,
|
||||
collateralPerByte,
|
||||
minPricePerBytePerSecond,
|
||||
)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let cid = (await client0.upload(data)).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
@ -199,7 +201,7 @@ marketplacesuite "Simulate invalid proofs":
|
||||
tolerance = ecTolerance,
|
||||
proofProbability = 1.u256,
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
let requestId = (await client0.requestId(purchaseId)).get
|
||||
|
||||
var slotWasFilled = false
|
||||
proc onSlotFilled(eventResult: ?!SlotFilled) =
|
||||
@ -273,7 +275,9 @@ marketplacesuite "Simulate invalid proofs":
|
||||
# totalSize=slotSize, # should match 1 slot only
|
||||
# duration=totalPeriods.periods.u256,
|
||||
# minPricePerBytePerSecond=minPricePerBytePerSecond,
|
||||
# totalCollateral=slotSize * minPricePerBytePerSecond
|
||||
# totalCollateral=slotSize * minPricePerBytePerSecond,
|
||||
# enabled = true.some,
|
||||
# until = 0.SecondsSince1970.some,
|
||||
# )
|
||||
|
||||
# let cid = client0.upload(data).get
|
||||
|
||||
@ -8,22 +8,26 @@ import ../examples
|
||||
twonodessuite "Purchasing":
|
||||
test "node handles storage request", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let id1 = client1.requestStorage(
|
||||
cid,
|
||||
duration = 100.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 10.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
let cid = (await client1.upload(data)).get
|
||||
let id1 = (
|
||||
await client1.requestStorage(
|
||||
cid,
|
||||
duration = 100.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 10.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
)
|
||||
).get
|
||||
let id2 = client1.requestStorage(
|
||||
cid,
|
||||
duration = 400.uint64,
|
||||
pricePerBytePerSecond = 2.u256,
|
||||
proofProbability = 6.u256,
|
||||
expiry = 10.uint64,
|
||||
collateralPerByte = 2.u256,
|
||||
let id2 = (
|
||||
await client1.requestStorage(
|
||||
cid,
|
||||
duration = 400.uint64,
|
||||
pricePerBytePerSecond = 2.u256,
|
||||
proofProbability = 6.u256,
|
||||
expiry = 10.uint64,
|
||||
collateralPerByte = 2.u256,
|
||||
)
|
||||
).get
|
||||
check id1 != id2
|
||||
|
||||
@ -34,19 +38,21 @@ twonodessuite "Purchasing":
|
||||
rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2
|
||||
)
|
||||
let data = await chunker.getBytes()
|
||||
let cid = client1.upload(byteutils.toHex(data)).get
|
||||
let id = client1.requestStorage(
|
||||
cid,
|
||||
duration = 100.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 30.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
nodes = 3,
|
||||
tolerance = 1,
|
||||
let cid = (await client1.upload(byteutils.toHex(data))).get
|
||||
let id = (
|
||||
await client1.requestStorage(
|
||||
cid,
|
||||
duration = 100.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 30.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
nodes = 3,
|
||||
tolerance = 1,
|
||||
)
|
||||
).get
|
||||
|
||||
let request = client1.getPurchase(id).get.request.get
|
||||
let request = (await client1.getPurchase(id)).get.request.get
|
||||
|
||||
check request.content.cid.data.buffer.len > 0
|
||||
check request.ask.duration == 100.uint64
|
||||
@ -75,23 +81,29 @@ twonodessuite "Purchasing":
|
||||
|
||||
test "node remembers purchase status after restart", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(
|
||||
cid,
|
||||
duration = 10 * 60.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 5 * 60.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
nodes = 3.uint,
|
||||
tolerance = 1.uint,
|
||||
let cid = (await client1.upload(data)).get
|
||||
let id = (
|
||||
await client1.requestStorage(
|
||||
cid,
|
||||
duration = 10 * 60.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 5 * 60.uint64,
|
||||
collateralPerByte = 1.u256,
|
||||
nodes = 3.uint,
|
||||
tolerance = 1.uint,
|
||||
)
|
||||
).get
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000)
|
||||
check eventually(
|
||||
await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000
|
||||
)
|
||||
|
||||
await node1.restart()
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000)
|
||||
let request = client1.getPurchase(id).get.request.get
|
||||
check eventually(
|
||||
await client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000
|
||||
)
|
||||
let request = (await client1.getPurchase(id)).get.request.get
|
||||
check request.ask.duration == (10 * 60).uint64
|
||||
check request.ask.pricePerBytePerSecond == 1.u256
|
||||
check request.ask.proofProbability == 3.u256
|
||||
@ -102,19 +114,20 @@ twonodessuite "Purchasing":
|
||||
|
||||
test "node requires expiry and its value to be in future", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let cid = (await client1.upload(data)).get
|
||||
|
||||
let responseMissing = client1.requestStorageRaw(
|
||||
let responseMissing = await client1.requestStorageRaw(
|
||||
cid,
|
||||
duration = 1.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
collateralPerByte = 1.u256,
|
||||
)
|
||||
check responseMissing.status == "400 Bad Request"
|
||||
check responseMissing.body == "Expiry required"
|
||||
check responseMissing.status == 422
|
||||
check (await responseMissing.body) ==
|
||||
"Expiry must be greater than zero and less than the request's duration"
|
||||
|
||||
let responseBefore = client1.requestStorageRaw(
|
||||
let responseBefore = await client1.requestStorageRaw(
|
||||
cid,
|
||||
duration = 10.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
@ -122,6 +135,6 @@ twonodessuite "Purchasing":
|
||||
collateralPerByte = 1.u256,
|
||||
expiry = 10.uint64,
|
||||
)
|
||||
check responseBefore.status == "400 Bad Request"
|
||||
check "Expiry needs value bigger then zero and smaller then the request's duration" in
|
||||
responseBefore.body
|
||||
check responseBefore.status == 422
|
||||
check "Expiry must be greater than zero and less than the request's duration" in
|
||||
(await responseBefore.body)
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import std/httpclient
|
||||
import std/importutils
|
||||
import std/net
|
||||
import std/sequtils
|
||||
@ -14,29 +13,32 @@ import json
|
||||
|
||||
twonodessuite "REST API":
|
||||
test "nodes can print their peer information", twoNodesConfig:
|
||||
check !client1.info() != !client2.info()
|
||||
check !(await client1.info()) != !(await client2.info())
|
||||
|
||||
test "nodes can set chronicles log level", twoNodesConfig:
|
||||
client1.setLogLevel("DEBUG;TRACE:codex")
|
||||
await client1.setLogLevel("DEBUG;TRACE:codex")
|
||||
|
||||
test "node accepts file uploads", twoNodesConfig:
|
||||
let cid1 = client1.upload("some file contents").get
|
||||
let cid2 = client1.upload("some other contents").get
|
||||
let cid1 = (await client1.upload("some file contents")).get
|
||||
let cid2 = (await client1.upload("some other contents")).get
|
||||
|
||||
check cid1 != cid2
|
||||
|
||||
test "node shows used and available space", twoNodesConfig:
|
||||
discard client1.upload("some file contents").get
|
||||
discard (await client1.upload("some file contents")).get
|
||||
let totalSize = 12.uint64
|
||||
let minPricePerBytePerSecond = 1.u256
|
||||
let totalCollateral = totalSize.u256 * minPricePerBytePerSecond
|
||||
discard client1.postAvailability(
|
||||
totalSize = totalSize,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
discard (
|
||||
await client1.postAvailability(
|
||||
totalSize = totalSize,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
enabled = true.some,
|
||||
)
|
||||
).get
|
||||
let space = client1.space().tryGet()
|
||||
let space = (await client1.space()).tryGet()
|
||||
check:
|
||||
space.totalBlocks == 2
|
||||
space.quotaMaxBytes == 21474836480.NBytes
|
||||
@ -47,128 +49,29 @@ twonodessuite "REST API":
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
let cid1 = client1.upload(content1).get
|
||||
let cid2 = client1.upload(content2).get
|
||||
let list = client1.list().get
|
||||
let cid1 = (await client1.upload(content1)).get
|
||||
let cid2 = (await client1.upload(content2)).get
|
||||
let list = (await client1.list()).get
|
||||
|
||||
check:
|
||||
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
|
||||
|
||||
test "request storage fails for datasets that are too small", twoNodesConfig:
|
||||
let cid = client1.upload("some file contents").get
|
||||
let response = client1.requestStorageRaw(
|
||||
cid,
|
||||
duration = 10.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
collateralPerByte = 1.u256,
|
||||
expiry = 9.uint64,
|
||||
)
|
||||
|
||||
check:
|
||||
response.status == "400 Bad Request"
|
||||
response.body ==
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(2 * DefaultBlockSize.int) & " bytes"
|
||||
|
||||
test "request storage succeeds for sufficiently sized datasets", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let response = client1.requestStorageRaw(
|
||||
cid,
|
||||
duration = 10.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
collateralPerByte = 1.u256,
|
||||
expiry = 9.uint64,
|
||||
let cid = (await client1.upload(data)).get
|
||||
let response = (
|
||||
await client1.requestStorageRaw(
|
||||
cid,
|
||||
duration = 10.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
collateralPerByte = 1.u256,
|
||||
expiry = 9.uint64,
|
||||
)
|
||||
)
|
||||
|
||||
check:
|
||||
response.status == "200 OK"
|
||||
|
||||
test "request storage fails if tolerance is zero", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 0
|
||||
|
||||
var responseBefore = client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "request storage fails if duration exceeds limit", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = (31 * 24 * 60 * 60).uint64
|
||||
# 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 2
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
|
||||
var responseBefore = client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == "400 Bad Request"
|
||||
check "Duration exceeds limit of" in responseBefore.body
|
||||
|
||||
test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)]
|
||||
|
||||
for ecParam in ecParams:
|
||||
let (nodes, tolerance) = ecParam
|
||||
|
||||
var responseBefore = client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body ==
|
||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
|
||||
|
||||
test "request storage fails if tolerance > nodes (underflow protection)",
|
||||
twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let ecParams = @[(0, 1), (1, 2), (2, 3)]
|
||||
|
||||
for ecParam in ecParams:
|
||||
let (nodes, tolerance) = ecParam
|
||||
|
||||
var responseBefore = client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body ==
|
||||
"Invalid parameters: `tolerance` cannot be greater than `nodes`"
|
||||
response.status == 200
|
||||
|
||||
for ecParams in @[
|
||||
(minBlocks: 2, nodes: 3, tolerance: 1), (minBlocks: 3, nodes: 5, tolerance: 2)
|
||||
@ -177,70 +80,55 @@ twonodessuite "REST API":
|
||||
test "request storage succeeds if nodes and tolerance within range " &
|
||||
fmt"({minBlocks=}, {nodes=}, {tolerance=})", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks = minBlocks)
|
||||
let cid = client1.upload(data).get
|
||||
let cid = (await client1.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
|
||||
var responseBefore = client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
var responseBefore = (
|
||||
await client1.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
)
|
||||
|
||||
check responseBefore.status == "200 OK"
|
||||
check responseBefore.status == 200
|
||||
|
||||
test "node accepts file uploads with content type", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
let headers = @[("Content-Type", "text/plain")]
|
||||
let response = await client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
check response.status == 200
|
||||
check (await response.body) != ""
|
||||
|
||||
test "node accepts file uploads with content disposition", twoNodesConfig:
|
||||
let headers =
|
||||
newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
let headers = @[("Content-Disposition", "attachment; filename=\"example.txt\"")]
|
||||
let response = await client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
check response.status == 200
|
||||
check (await response.body) != ""
|
||||
|
||||
test "node accepts file uploads with content disposition without filename",
|
||||
twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
let headers = @[("Content-Disposition", "attachment")]
|
||||
let response = await client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "upload fails if content disposition contains bad filename", twoNodesConfig:
|
||||
let headers =
|
||||
newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The filename is not valid."
|
||||
|
||||
test "upload fails if content type is invalid", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "hello/world"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The MIME type 'hello/world' is not valid."
|
||||
check response.status == 200
|
||||
check (await response.body) != ""
|
||||
|
||||
test "node retrieve the metadata", twoNodesConfig:
|
||||
let headers = newHttpHeaders(
|
||||
{
|
||||
"Content-Type": "text/plain",
|
||||
"Content-Disposition": "attachment; filename=\"example.txt\"",
|
||||
}
|
||||
)
|
||||
let uploadResponse = client1.uploadRaw("some file contents", headers)
|
||||
let cid = uploadResponse.body
|
||||
let listResponse = client1.listRaw()
|
||||
let headers =
|
||||
@[
|
||||
("Content-Type", "text/plain"),
|
||||
("Content-Disposition", "attachment; filename=\"example.txt\""),
|
||||
]
|
||||
let uploadResponse = await client1.uploadRaw("some file contents", headers)
|
||||
let cid = await uploadResponse.body
|
||||
let listResponse = await client1.listRaw()
|
||||
|
||||
let jsonData = parseJson(listResponse.body)
|
||||
let jsonData = parseJson(await listResponse.body)
|
||||
|
||||
check jsonData.hasKey("content") == true
|
||||
|
||||
@ -256,83 +144,79 @@ twonodessuite "REST API":
|
||||
check manifest["mimetype"].getStr() == "text/plain"
|
||||
|
||||
test "node set the headers when for download", twoNodesConfig:
|
||||
let headers = newHttpHeaders(
|
||||
{
|
||||
"Content-Disposition": "attachment; filename=\"example.txt\"",
|
||||
"Content-Type": "text/plain",
|
||||
}
|
||||
)
|
||||
let headers =
|
||||
@[
|
||||
("Content-Disposition", "attachment; filename=\"example.txt\""),
|
||||
("Content-Type", "text/plain"),
|
||||
]
|
||||
|
||||
let uploadResponse = client1.uploadRaw("some file contents", headers)
|
||||
let cid = uploadResponse.body
|
||||
let uploadResponse = await client1.uploadRaw("some file contents", headers)
|
||||
let cid = await uploadResponse.body
|
||||
|
||||
check uploadResponse.status == "200 OK"
|
||||
check uploadResponse.status == 200
|
||||
|
||||
let response = client1.downloadRaw(cid)
|
||||
let response = await client1.downloadRaw(cid)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.headers.hasKey("Content-Type") == true
|
||||
check response.headers["Content-Type"] == "text/plain"
|
||||
check response.headers.hasKey("Content-Disposition") == true
|
||||
check response.headers["Content-Disposition"] ==
|
||||
check response.status == 200
|
||||
check "Content-Type" in response.headers
|
||||
check response.headers.getString("Content-Type") == "text/plain"
|
||||
check "Content-Disposition" in response.headers
|
||||
check response.headers.getString("Content-Disposition") ==
|
||||
"attachment; filename=\"example.txt\""
|
||||
|
||||
let local = true
|
||||
let localResponse = client1.downloadRaw(cid, local)
|
||||
let localResponse = await client1.downloadRaw(cid, local)
|
||||
|
||||
check localResponse.status == "200 OK"
|
||||
check localResponse.headers.hasKey("Content-Type") == true
|
||||
check localResponse.headers["Content-Type"] == "text/plain"
|
||||
check localResponse.headers.hasKey("Content-Disposition") == true
|
||||
check localResponse.headers["Content-Disposition"] ==
|
||||
check localResponse.status == 200
|
||||
check "Content-Type" in localResponse.headers
|
||||
check localResponse.headers.getString("Content-Type") == "text/plain"
|
||||
check "Content-Disposition" in localResponse.headers
|
||||
check localResponse.headers.getString("Content-Disposition") ==
|
||||
"attachment; filename=\"example.txt\""
|
||||
|
||||
test "should delete a dataset when requested", twoNodesConfig:
|
||||
let cid = client1.upload("some file contents").get
|
||||
let cid = (await client1.upload("some file contents")).get
|
||||
|
||||
var response = client1.downloadRaw($cid, local = true)
|
||||
check response.body == "some file contents"
|
||||
var response = await client1.downloadRaw($cid, local = true)
|
||||
check (await response.body) == "some file contents"
|
||||
|
||||
client1.delete(cid).get
|
||||
(await client1.delete(cid)).get
|
||||
|
||||
response = client1.downloadRaw($cid, local = true)
|
||||
check response.status == "404 Not Found"
|
||||
response = await client1.downloadRaw($cid, local = true)
|
||||
check response.status == 404
|
||||
|
||||
test "should return 200 when attempting delete of non-existing block", twoNodesConfig:
|
||||
let response = client1.deleteRaw($(Cid.example()))
|
||||
check response.status == "204 No Content"
|
||||
let response = await client1.deleteRaw($(Cid.example()))
|
||||
check response.status == 204
|
||||
|
||||
test "should return 200 when attempting delete of non-existing dataset",
|
||||
twoNodesConfig:
|
||||
let cid = Manifest.example().makeManifestBlock().get.cid
|
||||
let response = client1.deleteRaw($cid)
|
||||
check response.status == "204 No Content"
|
||||
let response = await client1.deleteRaw($cid)
|
||||
check response.status == 204
|
||||
|
||||
test "should not crash if the download stream is closed before download completes",
|
||||
twoNodesConfig:
|
||||
privateAccess(client1.type)
|
||||
privateAccess(client1.http.type)
|
||||
# FIXME this is not a good test. For some reason, to get this to fail, I have to
|
||||
# store content that is several times the default stream buffer size, otherwise
|
||||
# the test will succeed even when the bug is present. Since this is probably some
|
||||
# setting that is internal to chronos, it might change in future versions,
|
||||
# invalidating this test. Works on Chronos 4.0.3.
|
||||
|
||||
let cid = client1.upload(repeat("some file contents", 1000)).get
|
||||
let httpClient = client1.http()
|
||||
let
|
||||
contents = repeat("b", DefaultStreamBufferSize * 10)
|
||||
cid = (await client1.upload(contents)).get
|
||||
response = await client1.downloadRaw($cid)
|
||||
|
||||
try:
|
||||
# Sadly, there's no high level API for preventing the client from
|
||||
# consuming the whole response, and we need to close the socket
|
||||
# before that happens if we want to trigger the bug, so we need to
|
||||
# resort to this.
|
||||
httpClient.getBody = false
|
||||
let response = client1.downloadRaw($cid, httpClient = httpClient)
|
||||
let reader = response.getBodyReader()
|
||||
|
||||
# Read 4 bytes from the stream just to make sure we actually
|
||||
# receive some data.
|
||||
let data = httpClient.socket.recv(4)
|
||||
check data.len == 4
|
||||
# Read 4 bytes from the stream just to make sure we actually
|
||||
# receive some data.
|
||||
check (bytesToString await reader.read(4)) == "bbbb"
|
||||
|
||||
# Prematurely closes the connection.
|
||||
httpClient.close()
|
||||
finally:
|
||||
httpClient.getBody = true
|
||||
# Abruptly closes the stream (we have to dig all the way to the transport
|
||||
# or Chronos will close things "nicely").
|
||||
response.connection.reader.tsource.close()
|
||||
|
||||
let response = client1.downloadRaw($cid, httpClient = httpClient)
|
||||
check response.body == repeat("some file contents", 1000)
|
||||
let response2 = await client1.downloadRaw($cid)
|
||||
check (await response2.body) == contents
|
||||
|
||||
384
tests/integration/testrestapivalidation.nim
Normal file
384
tests/integration/testrestapivalidation.nim
Normal file
@ -0,0 +1,384 @@
|
||||
import std/httpclient
|
||||
import std/times
|
||||
import pkg/ethers
|
||||
import pkg/codex/manifest
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/contracts
|
||||
from pkg/codex/stores/repostore/types import DefaultQuotaBytes
|
||||
import ../asynctest
|
||||
import ../checktest
|
||||
import ../examples
|
||||
import ../codex/examples
|
||||
import ./codexconfig
|
||||
import ./codexprocess
|
||||
|
||||
from ./multinodes import Role, getTempDirName, jsonRpcProviderUrl, nextFreePort
|
||||
|
||||
# This suite allows to run fast the basic rest api validation.
|
||||
# It starts only one node for all the checks in order to speed up
|
||||
# the execution.
|
||||
asyncchecksuite "Rest API validation":
|
||||
var node: CodexProcess
|
||||
var config = CodexConfigs.init(nodes = 1).configs[0]
|
||||
let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss")
|
||||
let nodexIdx = 0
|
||||
let datadir = getTempDirName(starttime, Role.Client, nodexIdx)
|
||||
|
||||
config.addCliOption("--api-port", $(waitFor nextFreePort(8081)))
|
||||
config.addCliOption("--data-dir", datadir)
|
||||
config.addCliOption("--nat", "none")
|
||||
config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0")
|
||||
config.addCliOption("--disc-port", $(waitFor nextFreePort(8081)))
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $EthAddress.example)
|
||||
|
||||
node =
|
||||
waitFor CodexProcess.startNode(config.cliArgs, config.debugEnabled, $Role.Client)
|
||||
|
||||
waitFor node.waitUntilStarted()
|
||||
|
||||
let client = node.client()
|
||||
|
||||
test "should return 422 when attempting delete of non-existing dataset":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 0
|
||||
|
||||
var responseBefore = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "request storage fails for datasets that are too small":
|
||||
let cid = (await client.upload("some file contents")).get
|
||||
let response = (
|
||||
await client.requestStorageRaw(
|
||||
cid,
|
||||
duration = 10.uint64,
|
||||
pricePerBytePerSecond = 1.u256,
|
||||
proofProbability = 3.u256,
|
||||
collateralPerByte = 1.u256,
|
||||
expiry = 9.uint64,
|
||||
)
|
||||
)
|
||||
|
||||
check:
|
||||
response.status == 422
|
||||
(await response.body) ==
|
||||
"Dataset too small for erasure parameters, need at least " &
|
||||
$(2 * DefaultBlockSize.int) & " bytes"
|
||||
|
||||
test "request storage fails if nodes and tolerance aren't correct":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let ecParams = @[(1, 1), (2, 1), (3, 2), (3, 3)]
|
||||
|
||||
for ecParam in ecParams:
|
||||
let (nodes, tolerance) = ecParam
|
||||
|
||||
var responseBefore = (
|
||||
await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) ==
|
||||
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
|
||||
|
||||
test "request storage fails if tolerance > nodes (underflow protection)":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 0
|
||||
|
||||
var responseBefore = (
|
||||
await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "upload fails if content disposition contains bad filename":
|
||||
let headers = @[("Content-Disposition", "attachment; filename=\"exam*ple.txt\"")]
|
||||
let response = await client.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "The filename is not valid."
|
||||
|
||||
test "upload fails if content type is invalid":
|
||||
let headers = @[("Content-Type", "hello/world")]
|
||||
let response = await client.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "The MIME type 'hello/world' is not valid."
|
||||
|
||||
test "updating non-existing availability":
|
||||
let nonExistingResponse = await client.patchAvailabilityRaw(
|
||||
AvailabilityId.example,
|
||||
duration = 100.uint64.some,
|
||||
minPricePerBytePerSecond = 2.u256.some,
|
||||
totalCollateral = 200.u256.some,
|
||||
)
|
||||
check nonExistingResponse.status == 404
|
||||
|
||||
test "updating availability - freeSize is not allowed to be changed":
|
||||
let availability = (
|
||||
await client.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
let freeSizeResponse =
|
||||
await client.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some)
|
||||
check freeSizeResponse.status == 422
|
||||
check "not allowed" in (await freeSizeResponse.body)
|
||||
|
||||
test "creating availability above the node quota returns 422":
|
||||
let response = await client.postAvailabilityRaw(
|
||||
totalSize = 24000000000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "Not enough storage quota"
|
||||
|
||||
test "updating availability above the node quota returns 422":
|
||||
let availability = (
|
||||
await client.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
let response = await client.patchAvailabilityRaw(
|
||||
availability.id, totalSize = 24000000000.uint64.some
|
||||
)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "Not enough storage quota"
|
||||
|
||||
test "creating availability when total size is zero returns 422":
|
||||
let response = await client.postAvailabilityRaw(
|
||||
totalSize = 0.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "Total size must be larger then zero"
|
||||
|
||||
test "updating availability when total size is zero returns 422":
|
||||
let availability = (
|
||||
await client.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
let response =
|
||||
await client.patchAvailabilityRaw(availability.id, totalSize = 0.uint64.some)
|
||||
|
||||
check response.status == 422
|
||||
check (await response.body) == "Total size must be larger then zero"
|
||||
|
||||
test "creating availability when total size is negative returns 422":
|
||||
let json =
|
||||
%*{
|
||||
"totalSize": "-1",
|
||||
"duration": "200",
|
||||
"minPricePerBytePerSecond": "3",
|
||||
"totalCollateral": "300",
|
||||
}
|
||||
let response = await client.post(client.buildUrl("/sales/availability"), $json)
|
||||
|
||||
check response.status == 400
|
||||
check (await response.body) == "Parsed integer outside of valid range"
|
||||
|
||||
test "updating availability when total size is negative returns 422":
|
||||
let availability = (
|
||||
await client.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
|
||||
let json = %*{"totalSize": "-1"}
|
||||
let response = await client.patch(
|
||||
client.buildUrl("/sales/availability/") & $availability.id, $json
|
||||
)
|
||||
|
||||
check response.status == 400
|
||||
check (await response.body) == "Parsed integer outside of valid range"
|
||||
|
||||
test "request storage fails if tolerance is zero":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 0
|
||||
|
||||
var responseBefore = (
|
||||
await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "request storage fails if duration exceeds limit":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = (31 * 24 * 60 * 60).uint64
|
||||
# 31 days TODO: this should not be hardcoded, but waits for https://github.com/codex-storage/nim-codex/issues/1056
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 2
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
|
||||
var responseBefore = (
|
||||
await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte,
|
||||
expiry, nodes.uint, tolerance.uint,
|
||||
)
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check "Duration exceeds limit of" in (await responseBefore.body)
|
||||
|
||||
test "request storage fails if expiry is zero":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 0.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 1
|
||||
|
||||
var responseBefore = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) ==
|
||||
"Expiry must be greater than zero and less than the request's duration"
|
||||
|
||||
test "request storage fails if proof probability is zero":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 0.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 1
|
||||
|
||||
var responseBefore = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) == "Proof probability must be greater than zero"
|
||||
|
||||
test "request storage fails if price per byte per second is zero":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 0.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 1.u256
|
||||
let nodes = 3
|
||||
let tolerance = 1
|
||||
|
||||
var responseBefore = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) ==
|
||||
"Price per byte per second must be greater than zero"
|
||||
|
||||
test "request storage fails if collareral per byte is zero":
|
||||
let data = await RandomChunker.example(blocks = 2)
|
||||
let cid = (await client.upload(data)).get
|
||||
let duration = 100.uint64
|
||||
let pricePerBytePerSecond = 1.u256
|
||||
let proofProbability = 3.u256
|
||||
let expiry = 30.uint64
|
||||
let collateralPerByte = 0.u256
|
||||
let nodes = 3
|
||||
let tolerance = 1
|
||||
|
||||
var responseBefore = await client.requestStorageRaw(
|
||||
cid, duration, pricePerBytePerSecond, proofProbability, collateralPerByte, expiry,
|
||||
nodes.uint, tolerance.uint,
|
||||
)
|
||||
|
||||
check responseBefore.status == 422
|
||||
check (await responseBefore.body) == "Collateral per byte must be greater than zero"
|
||||
|
||||
test "creating availability fails when until is negative":
|
||||
let totalSize = 12.uint64
|
||||
let minPricePerBytePerSecond = 1.u256
|
||||
let totalCollateral = totalSize.u256 * minPricePerBytePerSecond
|
||||
let response = await client.postAvailabilityRaw(
|
||||
totalSize = totalSize,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
until = -1.SecondsSince1970.some,
|
||||
)
|
||||
|
||||
check:
|
||||
response.status == 422
|
||||
(await response.body) == "Cannot set until to a negative value"
|
||||
|
||||
waitFor node.stop()
|
||||
node.removeDataDir()
|
||||
@ -1,5 +1,7 @@
|
||||
import std/httpclient
|
||||
import std/times
|
||||
import pkg/codex/contracts
|
||||
from pkg/codex/stores/repostore/types import DefaultQuotaBytes
|
||||
import ./twonodes
|
||||
import ../codex/examples
|
||||
import ../contracts/time
|
||||
@ -17,11 +19,13 @@ proc findItem[T](items: seq[T], item: T): ?!T =
|
||||
multinodesuite "Sales":
|
||||
let salesConfig = NodeConfigs(
|
||||
clients: CodexConfigs.init(nodes = 1).some,
|
||||
providers: CodexConfigs.init(nodes = 1).some,
|
||||
providers: CodexConfigs.init(nodes = 1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
|
||||
.some,
|
||||
)
|
||||
|
||||
let minPricePerBytePerSecond = 1.u256
|
||||
|
||||
var host: CodexClient
|
||||
var client: CodexClient
|
||||
|
||||
@ -30,81 +34,79 @@ multinodesuite "Sales":
|
||||
client = clients()[0].client
|
||||
|
||||
test "node handles new storage availability", salesConfig:
|
||||
let availability1 = host.postAvailability(
|
||||
totalSize = 1.uint64,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 4.u256,
|
||||
let availability1 = (
|
||||
await host.postAvailability(
|
||||
totalSize = 1.uint64,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 4.u256,
|
||||
)
|
||||
).get
|
||||
let availability2 = host.postAvailability(
|
||||
totalSize = 4.uint64,
|
||||
duration = 5.uint64,
|
||||
minPricePerBytePerSecond = 6.u256,
|
||||
totalCollateral = 7.u256,
|
||||
let availability2 = (
|
||||
await host.postAvailability(
|
||||
totalSize = 4.uint64,
|
||||
duration = 5.uint64,
|
||||
minPricePerBytePerSecond = 6.u256,
|
||||
totalCollateral = 7.u256,
|
||||
)
|
||||
).get
|
||||
check availability1 != availability2
|
||||
|
||||
test "node lists storage that is for sale", salesConfig:
|
||||
let availability = host.postAvailability(
|
||||
totalSize = 1.uint64,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 4.u256,
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = 1.uint64,
|
||||
duration = 2.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 4.u256,
|
||||
)
|
||||
).get
|
||||
check availability in host.getAvailabilities().get
|
||||
|
||||
test "updating non-existing availability", salesConfig:
|
||||
let nonExistingResponse = host.patchAvailabilityRaw(
|
||||
AvailabilityId.example,
|
||||
duration = 100.uint64.some,
|
||||
minPricePerBytePerSecond = 2.u256.some,
|
||||
totalCollateral = 200.u256.some,
|
||||
)
|
||||
check nonExistingResponse.status == "404 Not Found"
|
||||
check availability in (await host.getAvailabilities()).get
|
||||
|
||||
test "updating availability", salesConfig:
|
||||
let availability = host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
|
||||
host.patchAvailability(
|
||||
var until = getTime().toUnix()
|
||||
|
||||
await host.patchAvailability(
|
||||
availability.id,
|
||||
duration = 100.uint64.some,
|
||||
minPricePerBytePerSecond = 2.u256.some,
|
||||
totalCollateral = 200.u256.some,
|
||||
enabled = false.some,
|
||||
until = until.some,
|
||||
)
|
||||
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
let updatedAvailability =
|
||||
((await host.getAvailabilities()).get).findItem(availability).get
|
||||
check updatedAvailability.duration == 100.uint64
|
||||
check updatedAvailability.minPricePerBytePerSecond == 2
|
||||
check updatedAvailability.totalCollateral == 200
|
||||
check updatedAvailability.totalSize == 140000.uint64
|
||||
check updatedAvailability.freeSize == 140000.uint64
|
||||
|
||||
test "updating availability - freeSize is not allowed to be changed", salesConfig:
|
||||
let availability = host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
).get
|
||||
let freeSizeResponse =
|
||||
host.patchAvailabilityRaw(availability.id, freeSize = 110000.uint64.some)
|
||||
check freeSizeResponse.status == "400 Bad Request"
|
||||
check "not allowed" in freeSizeResponse.body
|
||||
check updatedAvailability.enabled == false
|
||||
check updatedAvailability.until == until
|
||||
|
||||
test "updating availability - updating totalSize", salesConfig:
|
||||
let availability = host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
host.patchAvailability(availability.id, totalSize = 100000.uint64.some)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
await host.patchAvailability(availability.id, totalSize = 100000.uint64.some)
|
||||
|
||||
let updatedAvailability =
|
||||
((await host.getAvailabilities()).get).findItem(availability).get
|
||||
check updatedAvailability.totalSize == 100000
|
||||
check updatedAvailability.freeSize == 100000
|
||||
|
||||
@ -115,38 +117,120 @@ multinodesuite "Sales":
|
||||
let minPricePerBytePerSecond = 3.u256
|
||||
let collateralPerByte = 1.u256
|
||||
let totalCollateral = originalSize.u256 * collateralPerByte
|
||||
let availability = host.postAvailability(
|
||||
totalSize = originalSize,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = originalSize,
|
||||
duration = 20 * 60.uint64,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = totalCollateral,
|
||||
)
|
||||
).get
|
||||
|
||||
# Lets create storage request that will utilize some of the availability's space
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration = 20 * 60.uint64,
|
||||
pricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
proofProbability = 3.u256,
|
||||
expiry = (10 * 60).uint64,
|
||||
collateralPerByte = collateralPerByte,
|
||||
nodes = 3,
|
||||
tolerance = 1,
|
||||
let cid = (await client.upload(data)).get
|
||||
let id = (
|
||||
await client.requestStorage(
|
||||
cid,
|
||||
duration = 20 * 60.uint64,
|
||||
pricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
proofProbability = 3.u256,
|
||||
expiry = (10 * 60).uint64,
|
||||
collateralPerByte = collateralPerByte,
|
||||
nodes = 3,
|
||||
tolerance = 1,
|
||||
)
|
||||
).get
|
||||
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check eventually(
|
||||
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
|
||||
)
|
||||
let updatedAvailability =
|
||||
((await host.getAvailabilities()).get).findItem(availability).get
|
||||
check updatedAvailability.totalSize != updatedAvailability.freeSize
|
||||
|
||||
let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize
|
||||
let totalSizeResponse =
|
||||
host.patchAvailabilityRaw(availability.id, totalSize = (utilizedSize - 1).some)
|
||||
check totalSizeResponse.status == "400 Bad Request"
|
||||
check "totalSize must be larger then current totalSize" in totalSizeResponse.body
|
||||
let totalSizeResponse = (
|
||||
await host.patchAvailabilityRaw(
|
||||
availability.id, totalSize = (utilizedSize - 1).some
|
||||
)
|
||||
)
|
||||
check totalSizeResponse.status == 422
|
||||
check "totalSize must be larger then current totalSize" in
|
||||
(await totalSizeResponse.body)
|
||||
|
||||
host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some)
|
||||
await host.patchAvailability(
|
||||
availability.id, totalSize = (originalSize + 20000).some
|
||||
)
|
||||
let newUpdatedAvailability =
|
||||
(host.getAvailabilities().get).findItem(availability).get
|
||||
((await host.getAvailabilities()).get).findItem(availability).get
|
||||
check newUpdatedAvailability.totalSize == originalSize + 20000
|
||||
check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000
|
||||
|
||||
test "updating availability fails with until negative", salesConfig:
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = 140000.uint64,
|
||||
duration = 200.uint64,
|
||||
minPricePerBytePerSecond = 3.u256,
|
||||
totalCollateral = 300.u256,
|
||||
)
|
||||
).get
|
||||
|
||||
let response =
|
||||
await host.patchAvailabilityRaw(availability.id, until = -1.SecondsSince1970.some)
|
||||
|
||||
check:
|
||||
(await response.body) == "Cannot set until to a negative value"
|
||||
|
||||
test "returns an error when trying to update the until date before an existing a request is finished",
|
||||
salesConfig:
|
||||
let size = 0xFFFFFF.uint64
|
||||
let data = await RandomChunker.example(blocks = 8)
|
||||
let duration = 20 * 60.uint64
|
||||
let minPricePerBytePerSecond = 3.u256
|
||||
let collateralPerByte = 1.u256
|
||||
let ecNodes = 3.uint
|
||||
let ecTolerance = 1.uint
|
||||
|
||||
# host makes storage available
|
||||
let availability = (
|
||||
await host.postAvailability(
|
||||
totalSize = size,
|
||||
duration = duration,
|
||||
minPricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
totalCollateral = size.u256 * minPricePerBytePerSecond,
|
||||
)
|
||||
).get
|
||||
|
||||
# client requests storage
|
||||
let cid = (await client.upload(data)).get
|
||||
let id = (
|
||||
await client.requestStorage(
|
||||
cid,
|
||||
duration = duration,
|
||||
pricePerBytePerSecond = minPricePerBytePerSecond,
|
||||
proofProbability = 3.u256,
|
||||
expiry = 10 * 60.uint64,
|
||||
collateralPerByte = collateralPerByte,
|
||||
nodes = ecNodes,
|
||||
tolerance = ecTolerance,
|
||||
)
|
||||
).get
|
||||
|
||||
check eventually(
|
||||
await client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000
|
||||
)
|
||||
let purchase = (await client.getPurchase(id)).get
|
||||
check purchase.error == none string
|
||||
|
||||
let unixNow = getTime().toUnix()
|
||||
let until = unixNow + 1.SecondsSince1970
|
||||
|
||||
let response = await host.patchAvailabilityRaw(
|
||||
availabilityId = availability.id, until = until.some
|
||||
)
|
||||
|
||||
check:
|
||||
response.status == 422
|
||||
(await response.body) ==
|
||||
"Until parameter must be greater or equal to the longest currently hosted slot"
|
||||
|
||||
@ -9,11 +9,11 @@ twonodessuite "Uploads and downloads":
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
let cid1 = client1.upload(content1).get
|
||||
let cid2 = client2.upload(content2).get
|
||||
let cid1 = (await client1.upload(content1)).get
|
||||
let cid2 = (await client2.upload(content2)).get
|
||||
|
||||
let resp1 = client1.download(cid1, local = true).get
|
||||
let resp2 = client2.download(cid2, local = true).get
|
||||
let resp1 = (await client1.download(cid1, local = true)).get
|
||||
let resp2 = (await client2.download(cid2, local = true)).get
|
||||
|
||||
check:
|
||||
content1 == resp1
|
||||
@ -23,11 +23,11 @@ twonodessuite "Uploads and downloads":
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
let cid1 = client1.upload(content1).get
|
||||
let cid2 = client2.upload(content2).get
|
||||
let cid1 = (await client1.upload(content1)).get
|
||||
let cid2 = (await client2.upload(content2)).get
|
||||
|
||||
let resp2 = client1.download(cid2, local = false).get
|
||||
let resp1 = client2.download(cid1, local = false).get
|
||||
let resp2 = (await client1.download(cid2, local = false)).get
|
||||
let resp1 = (await client2.download(cid1, local = false)).get
|
||||
|
||||
check:
|
||||
content1 == resp1
|
||||
@ -35,11 +35,12 @@ twonodessuite "Uploads and downloads":
|
||||
|
||||
test "node fails retrieving non-existing local file", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get # upload to first node
|
||||
let resp2 = client2.download(cid1, local = true) # try retrieving from second node
|
||||
let cid1 = (await client1.upload(content1)).get # upload to first node
|
||||
let resp2 =
|
||||
await client2.download(cid1, local = true) # try retrieving from second node
|
||||
|
||||
check:
|
||||
resp2.error.msg == "404 Not Found"
|
||||
resp2.error.msg == "404"
|
||||
|
||||
proc checkRestContent(cid: Cid, content: ?!string) =
|
||||
let c = content.tryGet()
|
||||
@ -67,26 +68,28 @@ twonodessuite "Uploads and downloads":
|
||||
|
||||
test "node allows downloading only manifest", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
let cid1 = (await client1.upload(content1)).get
|
||||
|
||||
let resp2 = client1.downloadManifestOnly(cid1)
|
||||
let resp2 = await client1.downloadManifestOnly(cid1)
|
||||
checkRestContent(cid1, resp2)
|
||||
|
||||
test "node allows downloading content without stream", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
let
|
||||
content1 = "some file contents"
|
||||
cid1 = (await client1.upload(content1)).get
|
||||
resp1 = await client2.downloadNoStream(cid1)
|
||||
|
||||
let resp1 = client2.downloadNoStream(cid1)
|
||||
checkRestContent(cid1, resp1)
|
||||
let resp2 = client2.download(cid1, local = true).get
|
||||
|
||||
let resp2 = (await client2.download(cid1, local = true)).get
|
||||
check:
|
||||
content1 == resp2
|
||||
|
||||
test "reliable transfer test", twoNodesConfig:
|
||||
proc transferTest(a: CodexClient, b: CodexClient) {.async.} =
|
||||
let data = await RandomChunker.example(blocks = 8)
|
||||
let cid = a.upload(data).get
|
||||
let response = b.download(cid).get
|
||||
let cid = (await a.upload(data)).get
|
||||
let response = (await b.download(cid)).get
|
||||
check:
|
||||
@response.mapIt(it.byte) == data
|
||||
|
||||
|
||||
@ -99,14 +99,14 @@ marketplacesuite "Validation":
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
let datasetSize =
|
||||
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
|
||||
createAvailabilities(
|
||||
await createAvailabilities(
|
||||
datasetSize.truncate(uint64),
|
||||
duration,
|
||||
collateralPerByte,
|
||||
minPricePerBytePerSecond,
|
||||
)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let cid = (await client0.upload(data)).get
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
@ -115,12 +115,12 @@ marketplacesuite "Validation":
|
||||
tolerance = ecTolerance,
|
||||
proofProbability = proofProbability,
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
let requestId = (await client0.requestId(purchaseId)).get
|
||||
|
||||
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
|
||||
|
||||
if not eventuallyS(
|
||||
client0.purchaseStateIs(purchaseId, "started"),
|
||||
await client0.purchaseStateIs(purchaseId, "started"),
|
||||
timeout = (expiry + 60).int,
|
||||
step = 5,
|
||||
):
|
||||
@ -169,14 +169,14 @@ marketplacesuite "Validation":
|
||||
let data = await RandomChunker.example(blocks = blocks)
|
||||
let datasetSize =
|
||||
datasetSize(blocks = blocks, nodes = ecNodes, tolerance = ecTolerance)
|
||||
createAvailabilities(
|
||||
await createAvailabilities(
|
||||
datasetSize.truncate(uint64),
|
||||
duration,
|
||||
collateralPerByte,
|
||||
minPricePerBytePerSecond,
|
||||
)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
let cid = (await client0.upload(data)).get
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
expiry = expiry,
|
||||
@ -185,12 +185,12 @@ marketplacesuite "Validation":
|
||||
tolerance = ecTolerance,
|
||||
proofProbability = proofProbability,
|
||||
)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
let requestId = (await client0.requestId(purchaseId)).get
|
||||
|
||||
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
|
||||
|
||||
if not eventuallyS(
|
||||
client0.purchaseStateIs(purchaseId, "started"),
|
||||
await client0.purchaseStateIs(purchaseId, "started"),
|
||||
timeout = (expiry + 60).int,
|
||||
step = 5,
|
||||
):
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import ./integration/testcli
|
||||
import ./integration/testrestapi
|
||||
import ./integration/testrestapivalidation
|
||||
import ./integration/testupdownload
|
||||
import ./integration/testsales
|
||||
import ./integration/testpurchasing
|
||||
|
||||
@ -24,7 +24,7 @@ suite "Taiko L2 Integration Tests":
|
||||
)
|
||||
node1.waitUntilStarted()
|
||||
|
||||
let bootstrap = (!node1.client.info())["spr"].getStr()
|
||||
let bootstrap = (!(await node1.client.info()))["spr"].getStr()
|
||||
|
||||
node2 = startNode(
|
||||
[
|
||||
|
||||
1
vendor/nim-ngtcp2
vendored
Submodule
1
vendor/nim-ngtcp2
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
|
||||
1
vendor/nim-quic
vendored
Submodule
1
vendor/nim-quic
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit ddcb31ffb74b5460ab37fd13547eca90594248bc
|
||||
Loading…
x
Reference in New Issue
Block a user