2023-09-01 05:44:41 +00:00
|
|
|
|
|
|
|
import std/json except `%`, `%*`
|
|
|
|
import std/macros
|
|
|
|
import std/options
|
|
|
|
import std/strutils
|
|
|
|
import std/strformat
|
|
|
|
import std/tables
|
|
|
|
import std/typetraits
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
from pkg/ethers import Address
|
|
|
|
from pkg/libp2p import Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$`
|
2023-09-01 05:44:41 +00:00
|
|
|
import pkg/contractabi
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import pkg/codexdht/discv5/node as dn
|
2023-09-01 05:44:41 +00:00
|
|
|
import pkg/stew/byteutils
|
|
|
|
import pkg/stint
|
|
|
|
import pkg/questionable/results
|
|
|
|
import ../errors
|
2024-01-09 12:09:43 +00:00
|
|
|
import exceptions
|
2023-09-01 05:44:41 +00:00
|
|
|
|
|
|
|
export json except `%`, `%*`
|
|
|
|
|
|
|
|
type
|
|
|
|
SerializationError = object of CodexError
|
|
|
|
UnexpectedKindError = object of SerializationError
|
|
|
|
|
|
|
|
template serialize* {.pragma.}
|
|
|
|
|
|
|
|
proc newUnexpectedKindError(
|
|
|
|
expectedType: type,
|
|
|
|
expectedKinds: string,
|
|
|
|
json: JsonNode
|
2024-01-09 12:09:43 +00:00
|
|
|
): ref UnexpectedKindError {.raises: [].} =
|
2023-09-01 05:44:41 +00:00
|
|
|
let kind = if json.isNil: "nil"
|
|
|
|
else: $json.kind
|
|
|
|
newException(UnexpectedKindError,
|
2024-01-09 12:09:43 +00:00
|
|
|
"deserialization to " & $expectedType & " failed: expected " &
|
|
|
|
expectedKinds & " but got " & kind)
|
2023-09-01 05:44:41 +00:00
|
|
|
|
|
|
|
proc newUnexpectedKindError(
|
|
|
|
expectedType: type,
|
|
|
|
expectedKinds: set[JsonNodeKind],
|
|
|
|
json: JsonNode
|
|
|
|
): ref UnexpectedKindError =
|
|
|
|
newUnexpectedKindError(expectedType, $expectedKinds, json)
|
|
|
|
|
|
|
|
proc newUnexpectedKindError(
|
|
|
|
expectedType: type,
|
|
|
|
expectedKind: JsonNodeKind,
|
|
|
|
json: JsonNode
|
|
|
|
): ref UnexpectedKindError =
|
|
|
|
newUnexpectedKindError(expectedType, {expectedKind}, json)
|
|
|
|
|
|
|
|
template expectJsonKind(
|
|
|
|
expectedType: type,
|
|
|
|
expectedKinds: set[JsonNodeKind],
|
|
|
|
json: JsonNode
|
|
|
|
) =
|
|
|
|
if json.isNil or json.kind notin expectedKinds:
|
|
|
|
return failure(newUnexpectedKindError(expectedType, expectedKinds, json))
|
|
|
|
|
|
|
|
template expectJsonKind(
|
|
|
|
expectedType: type,
|
|
|
|
expectedKind: JsonNodeKind,
|
|
|
|
json: JsonNode
|
|
|
|
) =
|
|
|
|
expectJsonKind(expectedType, {expectedKind}, json)
|
|
|
|
|
2023-11-14 15:23:50 +00:00
|
|
|
proc fromJson*(
|
|
|
|
T: type enum,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
expectJsonKind(string, JString, json)
|
|
|
|
catch parseEnum[T](json.str)
|
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
proc fromJson*(
|
|
|
|
_: type string,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!string =
|
|
|
|
if json.isNil:
|
|
|
|
let err = newException(ValueError, "'json' expected, but was nil")
|
|
|
|
return failure(err)
|
|
|
|
elif json.kind == JNull:
|
|
|
|
return success("null")
|
|
|
|
elif json.isNil or json.kind != JString:
|
|
|
|
return failure(newUnexpectedKindError(string, JString, json))
|
|
|
|
catch json.getStr
|
|
|
|
|
|
|
|
proc fromJson*(
|
|
|
|
_: type bool,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!bool =
|
|
|
|
expectJsonKind(bool, JBool, json)
|
|
|
|
catch json.getBool
|
|
|
|
|
|
|
|
proc fromJson*(
|
|
|
|
_: type int,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!int =
|
|
|
|
expectJsonKind(int, JInt, json)
|
|
|
|
catch json.getInt
|
|
|
|
|
|
|
|
proc fromJson*[T: SomeInteger](
|
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
when T is uint|uint64 or (not defined(js) and int.sizeof == 4):
|
|
|
|
expectJsonKind(T, {JInt, JString}, json)
|
|
|
|
case json.kind
|
|
|
|
of JString:
|
|
|
|
let x = parseBiggestUInt(json.str)
|
|
|
|
return success cast[T](x)
|
|
|
|
else:
|
|
|
|
return success T(json.num)
|
|
|
|
else:
|
|
|
|
expectJsonKind(T, {JInt}, json)
|
|
|
|
return success cast[T](json.num)
|
|
|
|
|
|
|
|
proc fromJson*[T: SomeFloat](
|
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
expectJsonKind(T, {JInt, JFloat, JString}, json)
|
|
|
|
if json.kind == JString:
|
|
|
|
case json.str
|
|
|
|
of "nan":
|
|
|
|
let b = NaN
|
|
|
|
return success T(b)
|
|
|
|
# dst = NaN # would fail some tests because range conversions would cause CT error
|
|
|
|
# in some cases; but this is not a hot-spot inside this branch and backend can optimize this.
|
|
|
|
of "inf":
|
|
|
|
let b = Inf
|
|
|
|
return success T(b)
|
|
|
|
of "-inf":
|
|
|
|
let b = -Inf
|
|
|
|
return success T(b)
|
|
|
|
else:
|
|
|
|
let err = newUnexpectedKindError(T, "'nan|inf|-inf'", json)
|
|
|
|
return failure(err)
|
|
|
|
else:
|
|
|
|
if json.kind == JFloat:
|
|
|
|
return success T(json.fnum)
|
|
|
|
else:
|
|
|
|
return success T(json.num)
|
|
|
|
|
|
|
|
proc fromJson*(
|
|
|
|
_: type seq[byte],
|
|
|
|
json: JsonNode
|
|
|
|
): ?!seq[byte] =
|
|
|
|
expectJsonKind(seq[byte], JString, json)
|
|
|
|
hexToSeqByte(json.getStr).catch
|
|
|
|
|
|
|
|
proc fromJson*[N: static[int], T: array[N, byte]](
|
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
expectJsonKind(T, JString, json)
|
|
|
|
T.fromHex(json.getStr).catch
|
|
|
|
|
|
|
|
proc fromJson*[T: distinct](
|
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
success T(? T.distinctBase.fromJson(json))
|
|
|
|
|
|
|
|
proc fromJson*[N: static[int], T: StUint[N]](
|
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
expectJsonKind(T, JString, json)
|
|
|
|
catch parse(json.getStr, T)
|
|
|
|
|
|
|
|
proc fromJson*[T](
|
|
|
|
_: type Option[T],
|
|
|
|
json: JsonNode
|
|
|
|
): ?! Option[T] =
|
|
|
|
if json.isNil or json.kind == JNull:
|
|
|
|
return success(none T)
|
|
|
|
without val =? T.fromJson(json), error:
|
|
|
|
return failure(error)
|
|
|
|
success(val.some)
|
|
|
|
|
|
|
|
proc fromJson*(
|
|
|
|
_: type Cid,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!Cid =
|
|
|
|
expectJsonKind(Cid, JString, json)
|
2023-11-14 15:23:50 +00:00
|
|
|
Cid.init(json.str).mapFailure
|
2023-09-01 05:44:41 +00:00
|
|
|
|
|
|
|
proc fromJson*[T](
|
|
|
|
_: type seq[T],
|
|
|
|
json: JsonNode
|
|
|
|
): ?! seq[T] =
|
|
|
|
expectJsonKind(seq[T], JArray, json)
|
|
|
|
var arr: seq[T] = @[]
|
|
|
|
for elem in json.elems:
|
|
|
|
arr.add(? T.fromJson(elem))
|
|
|
|
success arr
|
|
|
|
|
2023-11-14 15:23:50 +00:00
|
|
|
proc fromJson*[T: ref object or object](
|
2023-09-01 05:44:41 +00:00
|
|
|
_: type T,
|
|
|
|
json: JsonNode
|
|
|
|
): ?!T =
|
|
|
|
expectJsonKind(T, JObject, json)
|
2023-11-14 15:23:50 +00:00
|
|
|
var res = when type(T) is ref: T.new() else: T.default
|
2023-09-01 05:44:41 +00:00
|
|
|
|
|
|
|
# Leave this in, it's good for debugging:
|
|
|
|
# trace "deserializing object", to = $T, json
|
2023-11-14 15:23:50 +00:00
|
|
|
for name, value in fieldPairs(when type(T) is ref: res[] else: res):
|
2023-09-01 05:44:41 +00:00
|
|
|
if json{name} != nil:
|
|
|
|
without parsed =? type(value).fromJson(json{name}), e:
|
|
|
|
error "error deserializing field",
|
|
|
|
field = $T & "." & name,
|
|
|
|
json = json{name},
|
|
|
|
error = e.msg
|
|
|
|
return failure(e)
|
|
|
|
value = parsed
|
|
|
|
success(res)
|
|
|
|
|
|
|
|
proc fromJson*[T: object](
|
|
|
|
_: type T,
|
|
|
|
bytes: seq[byte]
|
|
|
|
): ?!T =
|
2024-01-09 12:09:43 +00:00
|
|
|
# FIXME remove launderBare when we upgrade to nim 2.0.0
|
|
|
|
let json = ?catch launderBare parseJson(string.fromBytes(bytes))
|
2023-09-01 05:44:41 +00:00
|
|
|
T.fromJson(json)
|
|
|
|
|
[marketplace] Availability improvements (#535)
## Problem
When Availabilities are created, the amount of bytes in the Availability are reserved in the repo, so those bytes on disk cannot be written to otherwise. When a request for storage is received by a node, if a previously created Availability is matched, an attempt will be made to fill a slot in the request (more accurately, the request's slots are added to the SlotQueue, and eventually those slots will be processed). During download, bytes that were reserved for the Availability were released (as they were written to disk). To prevent more bytes from being released than were reserved in the Availability, the Availability was marked as used during the download, so that no other requests would match the Availability, and therefore no new downloads (and byte releases) would begin. The unfortunate downside to this, is that the number of Availabilities a node has determines the download concurrency capacity. If, for example, a node creates a single Availability that covers all available disk space the operator is willing to use, that single Availability would mean that only one download could occur at a time, meaning the node could potentially miss out on storage opportunities.
## Solution
To alleviate the concurrency issue, each time a slot is processed, a Reservation is created, which takes size (aka reserved bytes) away from the Availability and stores them in the Reservation object. This can be done as many times as needed as long as there are enough bytes remaining in the Availability. Therefore, concurrent downloads are no longer limited by the number of Availabilities. Instead, they would more likely be limited to the SlotQueue's `maxWorkers`.
From a database design perspective, an Availability has zero or more Reservations.
Reservations are persisted in the RepoStore's metadata, along with Availabilities. The metadata store key path for Reservations is ` meta / sales / reservations / <availabilityId> / <reservationId>`, while Availabilities are stored one level up, eg `meta / sales / reservations / <availabilityId> `, allowing all Reservations for an Availability to be queried (this is not currently needed, but may be useful when work to restore Availability size is implemented, more on this later).
### Lifecycle
When a reservation is created, its size is deducted from the Availability, and when a reservation is deleted, any remaining size (bytes not written to disk) is returned to the Availability. If the request finishes, is cancelled (expired), or an error occurs, the Reservation is deleted (and any undownloaded bytes returned to the Availability). In addition, when the Sales module starts, any Reservations that are not actively being used in a filled slot, are deleted.
Having a Reservation persisted until after a storage request is completed, will allow for the originally set Availability size to be reclaimed once a request contract has been completed. This is a feature that is yet to be implemented, however the work in this PR is a step in the direction towards enabling this.
### Unknowns
Reservation size is determined by the `StorageAsk.slotSize`. If during download, more bytes than `slotSize` are attempted to be downloaded than this, then the Reservation update will fail, and the state machine will move to a `SaleErrored` state, deleting the Reservation. This will likely prevent the slot from being filled.
### Notes
Based on #514
2023-09-29 04:33:08 +00:00
|
|
|
proc fromJson*[T: ref object](
|
|
|
|
_: type T,
|
|
|
|
bytes: seq[byte]
|
|
|
|
): ?!T =
|
2024-01-09 12:09:43 +00:00
|
|
|
# FIXME remove launderBare when we upgrade to nim 2.0.0
|
|
|
|
let json = ?catch launderBare parseJson(string.fromBytes(bytes))
|
[marketplace] Availability improvements (#535)
## Problem
When Availabilities are created, the amount of bytes in the Availability are reserved in the repo, so those bytes on disk cannot be written to otherwise. When a request for storage is received by a node, if a previously created Availability is matched, an attempt will be made to fill a slot in the request (more accurately, the request's slots are added to the SlotQueue, and eventually those slots will be processed). During download, bytes that were reserved for the Availability were released (as they were written to disk). To prevent more bytes from being released than were reserved in the Availability, the Availability was marked as used during the download, so that no other requests would match the Availability, and therefore no new downloads (and byte releases) would begin. The unfortunate downside to this, is that the number of Availabilities a node has determines the download concurrency capacity. If, for example, a node creates a single Availability that covers all available disk space the operator is willing to use, that single Availability would mean that only one download could occur at a time, meaning the node could potentially miss out on storage opportunities.
## Solution
To alleviate the concurrency issue, each time a slot is processed, a Reservation is created, which takes size (aka reserved bytes) away from the Availability and stores them in the Reservation object. This can be done as many times as needed as long as there are enough bytes remaining in the Availability. Therefore, concurrent downloads are no longer limited by the number of Availabilities. Instead, they would more likely be limited to the SlotQueue's `maxWorkers`.
From a database design perspective, an Availability has zero or more Reservations.
Reservations are persisted in the RepoStore's metadata, along with Availabilities. The metadata store key path for Reservations is ` meta / sales / reservations / <availabilityId> / <reservationId>`, while Availabilities are stored one level up, eg `meta / sales / reservations / <availabilityId> `, allowing all Reservations for an Availability to be queried (this is not currently needed, but may be useful when work to restore Availability size is implemented, more on this later).
### Lifecycle
When a reservation is created, its size is deducted from the Availability, and when a reservation is deleted, any remaining size (bytes not written to disk) is returned to the Availability. If the request finishes, is cancelled (expired), or an error occurs, the Reservation is deleted (and any undownloaded bytes returned to the Availability). In addition, when the Sales module starts, any Reservations that are not actively being used in a filled slot, are deleted.
Having a Reservation persisted until after a storage request is completed, will allow for the originally set Availability size to be reclaimed once a request contract has been completed. This is a feature that is yet to be implemented, however the work in this PR is a step in the direction towards enabling this.
### Unknowns
Reservation size is determined by the `StorageAsk.slotSize`. If during download, more bytes than `slotSize` are attempted to be downloaded than this, then the Reservation update will fail, and the state machine will move to a `SaleErrored` state, deleting the Reservation. This will likely prevent the slot from being filled.
### Notes
Based on #514
2023-09-29 04:33:08 +00:00
|
|
|
T.fromJson(json)
|
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
func `%`*(s: string): JsonNode = newJString(s)
|
|
|
|
|
|
|
|
func `%`*(n: uint): JsonNode =
|
|
|
|
if n > cast[uint](int.high):
|
|
|
|
newJString($n)
|
|
|
|
else:
|
|
|
|
newJInt(BiggestInt(n))
|
|
|
|
|
|
|
|
func `%`*(n: int): JsonNode = newJInt(n)
|
|
|
|
|
|
|
|
func `%`*(n: BiggestUInt): JsonNode =
|
|
|
|
if n > cast[BiggestUInt](BiggestInt.high):
|
|
|
|
newJString($n)
|
|
|
|
else:
|
|
|
|
newJInt(BiggestInt(n))
|
|
|
|
|
|
|
|
func `%`*(n: BiggestInt): JsonNode = newJInt(n)
|
|
|
|
|
|
|
|
func `%`*(n: float): JsonNode =
|
|
|
|
if n != n: newJString("nan")
|
|
|
|
elif n == Inf: newJString("inf")
|
|
|
|
elif n == -Inf: newJString("-inf")
|
|
|
|
else: newJFloat(n)
|
|
|
|
|
|
|
|
func `%`*(b: bool): JsonNode = newJBool(b)
|
|
|
|
|
|
|
|
func `%`*(keyVals: openArray[tuple[key: string, val: JsonNode]]): JsonNode =
|
|
|
|
if keyVals.len == 0: return newJArray()
|
|
|
|
let jObj = newJObject()
|
|
|
|
for key, val in items(keyVals): jObj.fields[key] = val
|
|
|
|
jObj
|
|
|
|
|
|
|
|
template `%`*(j: JsonNode): JsonNode = j
|
|
|
|
|
|
|
|
func `%`*[T](table: Table[string, T]|OrderedTable[string, T]): JsonNode =
|
|
|
|
let jObj = newJObject()
|
|
|
|
for k, v in table: jObj[k] = ? %v
|
|
|
|
jObj
|
|
|
|
|
|
|
|
func `%`*[T](opt: Option[T]): JsonNode =
|
|
|
|
if opt.isSome: %(opt.get) else: newJNull()
|
|
|
|
|
|
|
|
func `%`*[T: object](obj: T): JsonNode =
|
|
|
|
let jsonObj = newJObject()
|
|
|
|
for name, value in obj.fieldPairs:
|
|
|
|
when value.hasCustomPragma(serialize):
|
|
|
|
jsonObj[name] = %value
|
|
|
|
jsonObj
|
|
|
|
|
|
|
|
func `%`*[T: ref object](obj: T): JsonNode =
|
|
|
|
let jsonObj = newJObject()
|
|
|
|
for name, value in obj[].fieldPairs:
|
|
|
|
when value.hasCustomPragma(serialize):
|
|
|
|
jsonObj[name] = %(value)
|
|
|
|
jsonObj
|
|
|
|
|
|
|
|
proc `%`*(o: enum): JsonNode = % $o
|
|
|
|
|
|
|
|
func `%`*(stint: StInt|StUint): JsonNode = %stint.toString
|
|
|
|
|
|
|
|
func `%`*(cstr: cstring): JsonNode = % $cstr
|
|
|
|
|
|
|
|
func `%`*(arr: openArray[byte]): JsonNode = % arr.to0xHex
|
|
|
|
|
|
|
|
func `%`*[T](elements: openArray[T]): JsonNode =
|
|
|
|
let jObj = newJArray()
|
|
|
|
for elem in elements: jObj.add(%elem)
|
|
|
|
jObj
|
|
|
|
|
|
|
|
func `%`*[T: distinct](id: T): JsonNode =
|
|
|
|
type baseType = T.distinctBase
|
|
|
|
% baseType(id)
|
|
|
|
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
func `%`*(cid: Cid): JsonNode = % $cid
|
|
|
|
|
|
|
|
func `%`*(obj: PeerId): JsonNode = % $obj
|
|
|
|
|
|
|
|
func `%`*(obj: SignedPeerRecord): JsonNode = % $obj
|
|
|
|
|
|
|
|
func `%`*(obj: dn.Address): JsonNode = % $obj
|
|
|
|
|
|
|
|
func `%`*(obj: AddressInfo): JsonNode = % $obj.address
|
|
|
|
|
|
|
|
func `%`*(obj: MultiAddress): JsonNode = % $obj
|
|
|
|
|
|
|
|
func `%`*(address: ethers.Address): JsonNode = % $address
|
|
|
|
|
2023-12-07 01:16:36 +00:00
|
|
|
func toJson*[T](item: T): string = $(%item)
|
[marketplace] Availability improvements (#535)
## Problem
When Availabilities are created, the amount of bytes in the Availability are reserved in the repo, so those bytes on disk cannot be written to otherwise. When a request for storage is received by a node, if a previously created Availability is matched, an attempt will be made to fill a slot in the request (more accurately, the request's slots are added to the SlotQueue, and eventually those slots will be processed). During download, bytes that were reserved for the Availability were released (as they were written to disk). To prevent more bytes from being released than were reserved in the Availability, the Availability was marked as used during the download, so that no other requests would match the Availability, and therefore no new downloads (and byte releases) would begin. The unfortunate downside to this, is that the number of Availabilities a node has determines the download concurrency capacity. If, for example, a node creates a single Availability that covers all available disk space the operator is willing to use, that single Availability would mean that only one download could occur at a time, meaning the node could potentially miss out on storage opportunities.
## Solution
To alleviate the concurrency issue, each time a slot is processed, a Reservation is created, which takes size (aka reserved bytes) away from the Availability and stores them in the Reservation object. This can be done as many times as needed as long as there are enough bytes remaining in the Availability. Therefore, concurrent downloads are no longer limited by the number of Availabilities. Instead, they would more likely be limited to the SlotQueue's `maxWorkers`.
From a database design perspective, an Availability has zero or more Reservations.
Reservations are persisted in the RepoStore's metadata, along with Availabilities. The metadata store key path for Reservations is ` meta / sales / reservations / <availabilityId> / <reservationId>`, while Availabilities are stored one level up, eg `meta / sales / reservations / <availabilityId> `, allowing all Reservations for an Availability to be queried (this is not currently needed, but may be useful when work to restore Availability size is implemented, more on this later).
### Lifecycle
When a reservation is created, its size is deducted from the Availability, and when a reservation is deleted, any remaining size (bytes not written to disk) is returned to the Availability. If the request finishes, is cancelled (expired), or an error occurs, the Reservation is deleted (and any undownloaded bytes returned to the Availability). In addition, when the Sales module starts, any Reservations that are not actively being used in a filled slot, are deleted.
Having a Reservation persisted until after a storage request is completed, will allow for the originally set Availability size to be reclaimed once a request contract has been completed. This is a feature that is yet to be implemented, however the work in this PR is a step in the direction towards enabling this.
### Unknowns
Reservation size is determined by the `StorageAsk.slotSize`. If during download, more bytes than `slotSize` are attempted to be downloaded than this, then the Reservation update will fail, and the state machine will move to a `SaleErrored` state, deleting the Reservation. This will likely prevent the slot from being filled.
### Notes
Based on #514
2023-09-29 04:33:08 +00:00
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
proc toJsnImpl(x: NimNode): NimNode =
|
|
|
|
case x.kind
|
|
|
|
of nnkBracket: # array
|
|
|
|
if x.len == 0: return newCall(bindSym"newJArray")
|
|
|
|
result = newNimNode(nnkBracket)
|
|
|
|
for i in 0 ..< x.len:
|
|
|
|
result.add(toJsnImpl(x[i]))
|
|
|
|
result = newCall(bindSym("%", brOpen), result)
|
|
|
|
of nnkTableConstr: # object
|
|
|
|
if x.len == 0: return newCall(bindSym"newJObject")
|
|
|
|
result = newNimNode(nnkTableConstr)
|
|
|
|
for i in 0 ..< x.len:
|
|
|
|
x[i].expectKind nnkExprColonExpr
|
|
|
|
result.add newTree(nnkExprColonExpr, x[i][0], toJsnImpl(x[i][1]))
|
|
|
|
result = newCall(bindSym("%", brOpen), result)
|
|
|
|
of nnkCurly: # empty object
|
|
|
|
x.expectLen(0)
|
|
|
|
result = newCall(bindSym"newJObject")
|
|
|
|
of nnkNilLit:
|
|
|
|
result = newCall(bindSym"newJNull")
|
|
|
|
of nnkPar:
|
|
|
|
if x.len == 1: result = toJsnImpl(x[0])
|
|
|
|
else: result = newCall(bindSym("%", brOpen), x)
|
|
|
|
else:
|
|
|
|
result = newCall(bindSym("%", brOpen), x)
|
|
|
|
|
|
|
|
macro `%*`*(x: untyped): JsonNode =
|
|
|
|
## Convert an expression to a JsonNode directly, without having to specify
|
|
|
|
## `%` for every element.
|
|
|
|
result = toJsnImpl(x)
|