feat(rest): adds erasure coding constraints when requesting storage (#848)

* Rest API: add erasure coding constraints when requesting storage

* clean up

* Make error message for "dataset too small" more informative.

* fix API integration test

---------

Co-authored-by: gmega <giuliano.mega@gmail.com>
This commit is contained in:
Eric 2024-06-28 07:26:19 +10:00 committed by GitHub
parent b004ca75f6
commit 67facb4b2a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 132 additions and 27 deletions

View File

@ -27,6 +27,7 @@ import ../blocktype as bt
import ../utils import ../utils
import ../utils/asynciter import ../utils/asynciter
import ../indexingstrategy import ../indexingstrategy
import ../errors
import pkg/stew/byteutils import pkg/stew/byteutils
@ -82,6 +83,13 @@ type
blocksCount: Natural blocksCount: Natural
strategy: StrategyType strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
func indexToPos(steps, idx, step: int): int {.inline.} = func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded ## Convert an index to a position in the encoded
## dataset ## dataset
@ -236,11 +244,13 @@ proc init*(
ecK: Natural, ecM: Natural, ecK: Natural, ecM: Natural,
strategy: StrategyType): ?!EncodingParams = strategy: StrategyType): ?!EncodingParams =
if ecK > manifest.blocksCount: if ecK > manifest.blocksCount:
return failure( let exc = (ref InsufficientBlocksError)(
"Unable to encode manifest, not enough blocks, ecK = " & msg: "Unable to encode manifest, not enough blocks, ecK = " &
$ecK & $ecK &
", blocksCount = " & ", blocksCount = " &
$manifest.blocksCount) $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize)
return failure(exc)
let let
rounded = roundUp(manifest.blocksCount, ecK) rounded = roundUp(manifest.blocksCount, ecK)

View File

@ -32,6 +32,7 @@ import ../node
import ../blocktype import ../blocktype
import ../conf import ../conf
import ../contracts import ../contracts
import ../erasure/erasure
import ../manifest import ../manifest
import ../streams/asyncstreamwrapper import ../streams/asyncstreamwrapper
import ../stores import ../stores
@ -432,8 +433,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let nodes = params.nodes |? 1 let nodes = params.nodes |? 1
let tolerance = params.tolerance |? 0 let tolerance = params.tolerance |? 0
if (nodes - tolerance) < 1: # prevent underflow
return RestApiResponse.error(Http400, "Tolerance cannot be greater or equal than nodes (nodes - tolerance)") if tolerance > nodes:
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`")
let ecK = nodes - tolerance
let ecM = tolerance # for readability
# ensure leopard constrainst of 1 < K ≥ M
if ecK <= 1 or ecK < ecM:
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`")
without expiry =? params.expiry: without expiry =? params.expiry:
return RestApiResponse.error(Http400, "Expiry required") return RestApiResponse.error(Http400, "Expiry required")
@ -451,6 +460,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
params.collateral, params.collateral,
expiry), error: expiry), error:
if error of InsufficientBlocksError:
return RestApiResponse.error(Http400,
"Dataset too small for erasure parameters, need at least " &
$(ref InsufficientBlocksError)(error).minSize.int & " bytes")
return RestApiResponse.error(Http500, error.msg) return RestApiResponse.error(Http500, error.msg)
return RestApiResponse.response(purchaseId.toHex) return RestApiResponse.response(purchaseId.toHex)

View File

@ -96,7 +96,7 @@ proc requestStorageRaw*(
proofProbability: UInt256, proofProbability: UInt256,
collateral: UInt256, collateral: UInt256,
expiry: uint = 0, expiry: uint = 0,
nodes: uint = 1, nodes: uint = 2,
tolerance: uint = 0 tolerance: uint = 0
): Response = ): Response =
@ -125,7 +125,7 @@ proc requestStorage*(
proofProbability: UInt256, proofProbability: UInt256,
expiry: uint, expiry: uint,
collateral: UInt256, collateral: UInt256,
nodes: uint = 1, nodes: uint = 2,
tolerance: uint = 0 tolerance: uint = 0
): ?!PurchaseId = ): ?!PurchaseId =
## Call request storage REST endpoint ## Call request storage REST endpoint

View File

@ -8,7 +8,8 @@ import ../examples
twonodessuite "Purchasing", debug1 = false, debug2 = false: twonodessuite "Purchasing", debug1 = false, debug2 = false:
test "node handles storage request": test "node handles storage request":
let cid = client1.upload("some file contents").get let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
check id1 != id2 check id1 != id2
@ -26,7 +27,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
proofProbability=3.u256, proofProbability=3.u256,
expiry=30, expiry=30,
collateral=200.u256, collateral=200.u256,
nodes=2, nodes=3,
tolerance=1).get tolerance=1).get
let request = client1.getPurchase(id).get.request.get let request = client1.getPurchase(id).get.request.get
@ -35,7 +36,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
check request.ask.proofProbability == 3.u256 check request.ask.proofProbability == 3.u256
check request.expiry == 30 check request.expiry == 30
check request.ask.collateral == 200.u256 check request.ask.collateral == 200.u256
check request.ask.slots == 2'u64 check request.ask.slots == 3'u64
check request.ask.maxSlotLoss == 1'u64 check request.ask.maxSlotLoss == 1'u64
# TODO: We currently do not support encoding single chunks # TODO: We currently do not support encoding single chunks
@ -52,7 +53,8 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
# check request.ask.maxSlotLoss == 1'u64 # check request.ask.maxSlotLoss == 1'u64
test "node remembers purchase status after restart": test "node remembers purchase status after restart":
let cid = client1.upload("some file contents").get let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let id = client1.requestStorage(cid, let id = client1.requestStorage(cid,
duration=100.u256, duration=100.u256,
reward=2.u256, reward=2.u256,
@ -71,25 +73,12 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
check request.ask.proofProbability == 3.u256 check request.ask.proofProbability == 3.u256
check request.expiry == 30 check request.expiry == 30
check request.ask.collateral == 200.u256 check request.ask.collateral == 200.u256
check request.ask.slots == 1'u64 check request.ask.slots == 2'u64
check request.ask.maxSlotLoss == 0'u64 check request.ask.maxSlotLoss == 0'u64
test "request storage fails if nodes and tolerance aren't correct":
let cid = client1.upload("some file contents").get
let responseBefore = client1.requestStorageRaw(cid,
duration=100.u256,
reward=2.u256,
proofProbability=3.u256,
expiry=30,
collateral=200.u256,
nodes=1,
tolerance=1)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Tolerance cannot be greater or equal than nodes (nodes - tolerance)"
test "node requires expiry and its value to be in future": test "node requires expiry and its value to be in future":
let cid = client1.upload("some file contents").get let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256)
check responseMissing.status == "400 Bad Request" check responseMissing.status == "400 Bad Request"

View File

@ -1,7 +1,9 @@
import std/httpclient
import std/sequtils import std/sequtils
from pkg/libp2p import `==` from pkg/libp2p import `==`
import pkg/codex/units import pkg/codex/units
import ./twonodes import ./twonodes
import ../examples
twonodessuite "REST API", debug1 = false, debug2 = false: twonodessuite "REST API", debug1 = false, debug2 = false:
@ -36,3 +38,93 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
check: check:
[cid1, cid2].allIt(it in list.content.mapIt(it.cid)) [cid1, cid2].allIt(it in list.content.mapIt(it.cid))
test "request storage fails for datasets that are too small":
let cid = client1.upload("some file contents").get
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9)
check:
response.status == "400 Bad Request"
response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes"
test "request storage succeeds for sufficiently sized datasets":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
check:
response.status == "200 OK"
test "request storage fails if nodes and tolerance aren't correct":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
test "request storage fails if tolerance > nodes (underflow protection)":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(0, 1), (1, 2), (2, 3)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`"
test "request storage succeeds if nodes and tolerance within range":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(2, 0), (3, 1), (5, 2)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "200 OK"