Compare commits

...

102 Commits

Author SHA1 Message Date
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
Slava
3d2d8273e6
chore: update testnet marketplace address (#1270) 2025-06-12 08:10:22 +00:00
Slava
e324ac8ca5
feat(docker): add codex network support for docker-entrypoint (#1262)
Co-authored-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
2025-06-11 14:02:39 +00:00
Adam Uhlíř
f267d99ea8
ci: docker stable tag (#1265) 2025-06-11 13:49:39 +00:00
Ben Bierens
8af73e02a9
bumps to latest nim-json-rpc (#1267) 2025-06-11 11:58:49 +00:00
markspanbroek
27d807a841
fix(sales): fix marketplace block expiry (#1258) 2025-06-11 11:27:09 +00:00
Ben Bierens
85823342e9
Improves logging in maintenance module and erasure. (#1264) 2025-06-10 13:27:52 +00:00
Ben Bierens
09a8419942
bumps codex-contracts-eth (#1261) 2025-06-10 09:18:04 +00:00
Adam Uhlíř
7502b9ad2c
feat(cirdl): auto-discovery of marketplace contract (#1259) 2025-06-09 10:04:15 +00:00
Arnaud
3e17207a0b
feat(marketplace) - add command line arg for maxPriorityFeePerGas (#1189) 2025-06-05 07:47:39 +00:00
Eric
1bea94c390
fix(tests): fetching past contract events test (#1255) 2025-06-04 20:36:09 -07:00
markspanbroek
ffbbee01b1
fix(purchasing): fix crash completing future more than once (#1249) 2025-06-04 14:15:07 +00:00
markspanbroek
2dd436bfb7
fix(sales): do not crash when retrieving request fails (#1248) 2025-06-04 11:22:14 +00:00
Arnaud
2e1306ac2d
chore: fix custom error handling when simulating invalid proofs (#1217)
* Fix custom error handling when simulating invalid proofs

* Update error message
2025-06-03 12:11:18 +00:00
Arnaud
45ade0e3c1
chore(marketplace): use canMarkProofAsMissing (#1188)
* Add canProofBeMarkedAsMissing

* Add more tests

* Update contracts submodule
2025-06-03 09:08:57 +00:00
Arnaud
ca869f6dce
fix(availabilities): use totalRemainingCollateral instead of totalCollateral for comparaison (#1229)
* Use totalRemainingCollateral instead of totalCollateral to compare the availability changes

* Update test to use totalRemainingCollateral instead of totalCollateral when testing OnAvailabilitySaved

* Reduce poll interval

* Fix flaky test

* Fix format
2025-06-02 16:47:12 +00:00
Slava
e43872d0b8
chore: update testnet marketplace address (#1245)
https://github.com/codex-storage/nim-codex/issues/1241
2025-05-30 09:12:55 +00:00
Giuliano Mega
d59c5b023c
chore: bump Nim to 2.2.4 (#1242)
* chore: bump Nim to 2.2.4

* fix: resolve symbol ambiguity and drop auto type

* fix: use reference to task instead of pointer or the compiler will deallocate `task` before the encoding/decoding is done

* fix: convention that maxCollateralPerByte equals totalRemainingCollateral when freeSize is 0 to avoid DivByZeroDefect

* fix: bump compiler version in CI pipeline as well
2025-05-29 16:37:38 -07:00
Arnaud
28a83db69e
chore: returns the collateral when a slot is reserved but not filled (#1216)
* Change token allowance method because increaseAllowance does not exist anymore

* Returns collateral when a reservation is deleted and not only a slot is filled

* Remove the returnedCollateral when the slot is not filled by the host

* Add returnedCollateral when the sale is ignored

* Add returnsCollateral variable for ignored state

* Rebase the contracts submodule on the master

* Add integration test

* Fix duration

* Remove unnecessary teardown function

* Remove misleading comment

* Get returned collateral from the request

* Enable logs to debug on CI

* Fix test

* Increase test timeout

* Fix typo

* Fix rebase
2025-05-29 14:47:37 +00:00
Slava
13811825b3
ci: use macos arm runners (#1174)
* ci: use inputs instead of matrix in a ccache key

* ci: switch to arm runners for macos

* ci: use node 20

* ci: pass cpu to a composite action
2025-05-29 10:17:46 +00:00
Arnaud
827d9ccccf
Update contracts (#1238) 2025-05-29 08:27:41 +00:00
Arnaud
c689542579
fix: sales cleanup cancellation (#1234)
* fix(sales): handle cancellation of slot cleanup

Ensures that processing slots from the slot queue
continues even when cleanup of a slot is cancelled.

Co-Authored-By: Eric <5089238+emizzle@users.noreply.github.com>

* chore(reservations): add more `raises` annotations

* Fix cleanup cancellation

* Add remove-agent to trackedfutures instead of the cleanup function

* Increase the timeout to match the request expiry

* Enable logs to debug on CI

* Remove useless except and do not return when add item back to slot queue fails

* Reduce poll interval to detect sale cancelled state

* Avoid cancelling cleanup routine

* Do not cancel creating reservation in order to avoid inconsistent state

* Remove useless try except

---------

Co-authored-by: Mark Spanbroek <mark@spanbroek.net>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-05-29 06:57:05 +00:00
munna0908
71422f0d3d
fix: Support for mapping multiple listener address (#1236)
* multi address mapping support

* fix thread issues

* fix local thread var issue

* chore: rename stopNatThread to stopNatThreads

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-27 09:05:43 +00:00
markspanbroek
25a8077e80
fix(integration): fix api validation test (#1201)
* integration: shutdown codex node at end of test

On Windows the codex node did not shut down properly after this test
finished.

* contracts: fix flaky test
2025-05-26 16:49:53 +00:00
Ben Bierens
bfbd7264df
Adds missing async-raises for prover.verify (#1237) 2025-05-26 15:48:59 +00:00
Arnaud
f7d06cd0e8
chore(marketplace): switch to websocket (#1166)
* Switch to websocket

* Create resubscribe future

* Resubscribe websocket events after 5 minutes

* Remove the subscribe workaround and use define the resubscribe symbol

* Use localhost for ws url

* Define 240 seconds for resubscription interval

* Ensute that updates are sync when using ws
2025-05-23 14:13:19 +00:00
Marcin Czenko
748830570a
checked exceptions in stores (#1179)
* checked exceptions in stores

* makes asynciter as much exception safe as it gets

* introduce "SafeAsyncIter" that uses Results and limits exceptions to cancellations

* adds {.push raises: [].} to errors

* uses SafeAsyncIter in "listBlocks" and in "getBlockExpirations"

* simplifies safeasynciter (magic of auto)

* gets rid of ugly casts

* tiny fix in hte way we create raising futures in tests of safeasynciter

* Removes two more casts caused by using checked exceptions

* adds an extended explanation of one more complex SafeAsyncIter test

* adds missing "finishOnErr" param in slice constructor of SafeAsyncIter

* better fix for "Error: Exception can raise an unlisted exception: Exception" error.

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-21 21:17:04 +00:00
markspanbroek
bde98738c2
fix(slotqueue): simplify slot queue workers (#1224)
* fix(slotqueue): simplify slot queue workers

- worker is now just an async running loop
- instead of passing a "done" Future, use an
  AsyncEvent to signal completion

* chore(slotqueue): address review comments

Co-Authored-By: Eric <5089238+emizzle@users.noreply.github.com>
Co-Authored-By: Dmitriy Ryajov <dryajov@gmail.com>

---------

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-15 13:02:04 +00:00
Adam Uhlíř
28e87d06cc
docs(openapi): freeSize non-optional (#1211) 2025-05-14 10:14:40 +00:00
Adam Uhlíř
f144099377
fix(api): availability creation validation (#1212) 2025-05-14 08:46:16 +00:00
Adam Uhlíř
19a5e05c13
docs(openapi): add local data delete endpoint (#1214)
* docs(openapi): add local data delete endpoint

* chore: feedback

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>

---------

Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-05-03 16:54:38 +00:00
Slava
b39d541227
chore: update testnet marketplace address (#1209)
https://github.com/codex-storage/nim-codex/issues/1203
2025-04-23 06:18:38 +00:00
Adam Uhlíř
d220e53fe1
ci: trigger python generator upon release (#1208) 2025-04-22 14:46:03 +00:00
Ben Bierens
2eb83a0ebb
Codex-contracts hash in version information. (#1207)
* Adds revision hash of codex-contracts to version information.

* Update codex/conf.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* Update codex/conf.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* Update codex/rest/api.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* simplified git command

* Remove space

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>

* Updates openapi.yaml

---------

Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2025-04-22 14:32:32 +00:00
Slava
22f5150d1d
ci: add compatible contracts image for nim-codex dist-tests docker image (#1204) 2025-04-18 14:21:24 +00:00
Eric
0f152d333c
chore: bump contracts to master (#1197)
Bump contracts to master branch.

There was a change that allowed hardhat to have multiple blocks with the same timestamp, so this needed to be reflected in two tests.
2025-04-17 06:13:43 +00:00
Adam Uhlíř
acf81d0571
chore: add marketplace topic to reservations (#1193) 2025-04-17 04:05:53 +00:00
Adam Uhlíř
7c7871ac75
ci: add compatible contracts image for nim-codex docker image (#1186)
* ci: add compatible contracts image for nim-codex docker image

* ci: with submodules

* ci: with submodules on correct place

* ci: remove double dash

* ci: avoiding artifact conflicts

* ci: add labels to arch images

* ci: correct way to add label to arch images

* ci: correct contract label

* ci: avoid building contracts image and use contracts commit hash

* refactor: better way to get the hash
2025-04-15 13:52:19 +00:00
markspanbroek
b92f79a654
Increase gas estimates (#1192)
* update nim-ethers to version 2.0.0

To allow for gas estimation of contract calls

* contracts: add 10% extra gas to contract calls

These calls could otherwise run out of gas because
the on-chain state may have changed between the time
of the estimate and the time of processing the
transaction.
2025-04-15 10:31:06 +00:00
Arnaud
6f62afef75
Apply changes to the openapi file (#1187) 2025-04-04 12:58:23 +00:00
Arnaud
4e2a321ad5
chore(openapi): add required parameters (#1178)
* Update the openapi file

* Fix typo

* Remove SalesAvailabilityCREATE and add collateralPerByte

* Fix SalesAvailability reference

* chore: adding perf optimization tweaks to openapi (#1182)

* chore: adding perf optimization tweaks to openapi

* chore: slotsize integer

---------

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
2025-04-02 14:09:23 +00:00
Slava
1213377ac4
ci: switch out from ubuntu 20.04 (#1184)
* ci: use ubuntu-latest for coverage (#1141)

* ci: pass --keep-going to lcov and genhtml (#1141)

* ci: use ubuntu-22.04 for release workflow (#1141)

* ci: install gcc-14 on linux (#1141)

* chore: bump nim-leveldbstatic to 0.2.1
2025-04-02 09:09:43 +00:00
munna0908
e9c6d19873
use constantine sha256 for codex tree hashing (#1168) 2025-03-31 06:41:08 +00:00
Marcin Czenko
5ec3b2b027
make sure we do not call "get" on unverified Result while fetching in batches (#1169)
* makes sure we do not call "get" on unverified result

* make handling of failed blocks in fetchBatched even more explicit

* simplifies allFinishedValues and makes it independent from allFinishedFailed

* only sleep if not iter.finished in fetchBatched
2025-03-31 04:57:55 +00:00
Marcin Czenko
0ec52abc98
fixes RandomChunker not respecting padding (#1170) 2025-03-31 04:48:22 +00:00
Arnaud
0032e60398
fix(marketplace): catch Marketplace_SlotIsFree and continue the cancelled process (#1139)
* Catch Marketplace_SlotIsFree and continue the cancelled process

* Add log message when the slot if free during failed state

* Reduce log level to debug for slot free error

* Separate slot mock errors

* Initialize variable in setyp

* Improve tests

* Remove non-meaningful checks and rename test

* Remove the Option in the error setters

* Return collateral when the state is cancelled only if the slot is filled by the host

* Do not propagate AsyncLockError

* Wrap contract error into specific error type

* Remove debug message

* Catch only SlotStateMismatchError in cancelled

* Fix error

* Remove returnBytesWas

* Use MarketError after raises pragma were defined

* Fix typo

* Fix lint
2025-03-26 15:17:39 +00:00
Arnaud
7deeb7d2b3
feat(marketplace): persistent availabilities (#1099)
* Add availability enabled parameter

* Return bytes to availability when finished

* Add until parameter

* Remove debug message

* Clean up and fix tests

* Update documentations and cleanup

* Avoid swallowing CancelledError

* Move until validation to reservations module

* Call onAvailabilityAdded callabck when the availability is enabled in sales

* Remove until validation in restapi when creating an availability

* Add openapi documentation

* Use results instead of stew/results (#1112)

* feat: request duration limit (#1057)

* feat: request duration limit

* Fix tests and duration type

* Add custom error

* Remove merge issue

* Update codex contracts eth

* Update market config and fix test

* Fix SlotReservationsConfig syntax

* Update dependencies

* test: remove doubled test

* chore: update contracts repo

---------

Co-authored-by: Arnaud <arnaud@status.im>

* fix(statemachine): do not raise from state.run (#1115)

* fix(statemachine): do not raise from state.run

* fix rebase

* fix exception handling in SaleProvingSimulated.prove

- re-raise CancelledError
- don't return State on CatchableError
- expect the Proofs_InvalidProof custom error instead of checking a string

* asyncSpawn salesagent.onCancelled

This was swallowing a KeyError in one of the tests (fixed in the previous commit)

* remove error handling states in asyncstatemachine

* revert unneeded changes

* formatting

* PR feedback, logging updates

* chore(integration): simplify block expiration integration test (#1100)

* chore(integration): simplify block expiration integration test

* clean up

* fix after rebase

* perf: contract storage optimizations (#1094)

* perf: contract storage optimizations

* Apply optimization changes

* Apply optimizing parameters sizing

* Update codex-contracts-eth

* bump latest changes in contracts branch

* Change requestDurationLimit to uint64

* fix tests

* fix tests

---------

Co-authored-by: Arnaud <arnaud@status.im>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>

* bump contracts to master (#1122)

* Add availability enabled parameter

* Return bytes to availability when finished

* Add until parameter

* Clean up and fix tests

* Move until validation to reservations module

* Apply suggestion changes: return the reservation module error

* Apply suggestion changes for until dates

* Apply suggestion changes: reorganize tests

* Fix indent

* Remove test related to timing issue

* Add raises errors to async pragram and remove useless try except

* Update open api documentation

* Fix wording

* Remove the httpClient restart statements

* Use market.getRequestEnd to set validUntil

* Remove returnBytes

* Use clock.now in testing

* Move the api validation file to the right file

---------

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-03-26 11:45:22 +00:00
Arnaud
60b6996eb0
chore(marketplace): define raises for async pragma (#1165)
* Define raises for async pragma

* Update nim ethers

* Replace CatchableError by MarketError
2025-03-26 08:06:37 +00:00
Arnaud
a0d6fbaf02
chore(marketplace) - fix the http error codes when validating the availability requests (#1104)
* Use 422 http code when there is a validation error

* Update the open api description

* Fix typo

* Add more tests for total size

* Catch CancelledError because TrackedFuture raise no error

* Split rest api validation test to a new file

* Change the way of testing negative numbers

* Rename client variable and fix test status code

* Try to reduce the number of requests in CI when asserting in tests

* Fix rebase and remove safeEventually
2025-03-24 15:47:05 +00:00
Arnaud
709a8648fd
chore: add request validations (#1144)
* Add request validations

* Define expiry as required field in storage request params and fix tests

* Fix error messages

* Enable logs to figure out the issue with recurring failing test on macos

* Add custom errors raised by contract

* Remove custom error non existing anymore

* Update asynctest module

* Update timer tests after updating asynctest
2025-03-24 11:53:34 +00:00
Dmitriy Ryajov
110147d8ef
monitor background tasks on streaming dataset (#1164) 2025-03-21 17:23:07 +00:00
munna0908
3a312596bf
deps: upgrade libp2p & constantine (#1167)
* upgrade libp2p and constantine

* fix libp2p update issues

* add missing vendor package

* add missing vendor package
2025-03-20 19:11:00 -07:00
Arnaud
9d7b521519
chore: add missing custom errors (#1134)
* Add missing custom errors

* Separate mock state errors

* Remove the Option in the error setters

* Wrap the contract errors in MarketError

* Remove async raises (needs to address it in another PR)

* Wrap contract errors into specific error types

* Rename SlotNotFreeError to SlotStateMismatchError
2025-03-18 07:06:46 +00:00
Giuliano Mega
54177e9fbf
feat(integration): use async client instead of standard Nim HTTP client (#1159)
* WiP: migrating CodexClient to chronos http client

* fix(api): fixes #1163

* feat: fully working API integration tests

* convert most of the tests in testupdownload

* feat: working updownload tests on async client

* feat: make testsales work with async codexclient

* feat: make testpurchasing work with async codexclient

* feat: make testblockexpiration work with async codexclient

* feat: make marketplacesuite work with async codexclient

* make testproofs work with async codexclient

* chore: refactor client to express higher level in terms of lower level operations

* fix: set correct content-length for erasure-coded datasets

* feat: make testecbug work with async client

* feat: make testvalidator work with async client

* refactor: simplify request aliases, add close operation

* wire back client.close at node shutdown

* refactor: remove unused exception

* fix: use await instead of waitFor on async call sites
2025-03-17 20:08:24 +00:00
munna0908
75db491d84
fix: optimise erasure encode/decode (#1123)
* avoid copying block,parity data to shared memory

* use alloc instead of allocShared

* code cleanup
2025-03-14 13:09:18 +00:00
tianzedavid
f1b84dc6d1
chore: fix some typos (#1110)
Signed-off-by: tianzedavid <cuitianze@aliyun.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-03-13 22:46:44 +00:00
Eric
a5db757de3
fix: ethers no longer leaks AsyncLockError (#1146)
* fix: ethers no longer leaks AsyncLockError

* Add message to convertEthersEthers

- adds a message to convertEthersError allowing contextual error messages
- replaces try/except EthersError with convertEthersError (PR feedback)

* bump ethers after PR merged upstream
2025-03-13 22:46:05 +00:00
Ben Bierens
a0ddcef08d
changes trace to info for updates of the annouce/dht record logs (#1156) 2025-03-13 22:45:44 +00:00
Dmitriy Ryajov
1cac3e2a11
Fix/rework async exceptions (#1130)
* cleanup imports and logs

* add BlockHandle type

* revert deps

* refactor: async error handling and future tracking improvements

- Update async procedures to use explicit raises annotation
- Modify TrackedFutures to handle futures with no raised exceptions
- Replace `asyncSpawn` with explicit future tracking
- Update test suites to use `unittest2`
- Standardize error handling across network and async components
- Remove deprecated error handling patterns

This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors.

* bump nim-serde

* remove asyncSpawn

* rework background downloads and prefetch

* imporove logging

* refactor: enhance async procedures with error handling and raise annotations

* misc cleanup

* misc

* refactor: implement allFinishedFailed to aggregate future results with success and failure tracking

* refactor: update error handling in reader procedures to raise ChunkerError and CancelledError

* refactor: improve error handling in wantListHandler and accountHandler procedures

* refactor: simplify LPStreamReadError creation by consolidating parameters

* refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors

* refactor: enhance error handling in advertiser and discovery loops to improve resilience

* misc

* refactor: improve code structure and readability

* remove cancellation from addSlotToQueue

* refactor: add assertion for unexpected errors in local store checks

* refactor: prevent tracking of finished futures and improve test assertions

* refactor: improve error handling in local store checks

* remove usage of msgDetail

* feat: add initial implementation of discovery engine and related components

* refactor: improve task scheduling logic by removing unnecessary break statement

* break after scheduling a task

* make taskHandler cancelable

* refactor: update async handlers to raise CancelledError

* refactor(advertiser): streamline error handling and improve task flow in advertise loops

* fix: correct spelling of "divisible" in error messages and comments

* refactor(discovery): simplify discovery task loop and improve error handling

* refactor(engine): filter peers before processing in cancelBlocks procedure
2025-03-13 07:33:15 -07:00
Arnaud
2538ff8da3
chore: create new httpClient per request (#1136)
* Create new httpClient per request

* Fix tests after rebase and close the clients at the end
2025-03-12 13:41:00 +00:00
Arnaud
17d3bb55cf
chore(marketplace): notify sales when duration, minPricePerBytePerSecond or totalCollateral is updated (#1148)
* Call onAvailabilityAdded when freeSize, duration or minPricePerBytePerSecond is increased

* Rename onAvailabilityAdded to onAvailabilitySaved

* Rename OnAvailabilitySaved to OnAvailabilityUpserted

* Go back to OnAvailabilitySaved
2025-03-12 09:12:06 +00:00
Arnaud
703921df32
chore(restapi): add headers to support on progress when downloading (#1150)
* Add headers to support on progress on download

* Replace http session by http client in downloadBytes

* Use int instead of int64 for datasetSize

* Rename variable to avoid shallowing client
2025-03-10 15:59:24 +00:00
Giuliano Mega
2a3a29720f
Fixes Codex crashes on interrupted downloads (#1151)
* fix: fixes Codex crashes on interrupted downloads

* fix: add better feedback to 404, minor rewording in test comment
2025-03-10 13:27:16 +00:00
Arnaud
eb09e610d5
fix(ci): handle coverage as a string to enable gcc 14 on linux (#1140)
* Handle coverage as a string not a boolean

* Update ubuntu version to latest
2025-03-05 08:35:46 +00:00
Arnaud
7065718e09
feat(marketplace): indicate that slot is being repaired when trying to download (#1083)
* Indicate that slot is being repaired when trying to download

* Fix tests

* Apply nph

* Calculate the repair collateral when adding the item into the queue

* Add slotCollateral calculation with getRequest cache and remove populationItem function

* Update with pricePerByte

* Simplify StorageAsk parameter

* Minor fixes

* Move cache request to another PR

* Rename SlotQueueItem collateral and required in init

* Use override func to optimise calls when the slot state is known

* Remove unused code

* Cosmetic change

* Use raiseMarketError helper

* Add exceptions to async pragma

* Cosmetic change

* Use raiseMarketError helper

* Let slotCollateral determines the slot sate

* Use configSync to avoid async pragma in onStorageRequested

* Add loadConfig function

* Add CatchableError to async pragma

* Add missing pragma raises errors

* Move loadConfig

* Avoid swallow CancelledError

* Avoid swallowing CancelledError

* Avoid swallowing CancelledError

* Update error messages

* Except MarketError instead of CatchableError

* Fix merge issue

* Log fatal when configuration cannot be loaded

* Propagate MarketError in slotCollateral

* Remove useless configSync

* Use result with explicit error

* Fix syntax

---------

Signed-off-by: Arnaud <arnaud@status.im>
2025-02-27 16:58:23 +00:00
Ben Bierens
fab5e16afd
Missing nullability causes json-serialize failure in some generated clients. (#1129) 2025-02-27 11:29:27 +00:00
322 changed files with 13316 additions and 4457 deletions

View File

@ -81,35 +81,35 @@ runs:
mingw-w64-i686-ntldd-git
mingw-w64-i686-rust
- name: MSYS2 (Windows All) - Update to gcc 14
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
pacman -U --noconfirm https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-14.2.0-2-any.pkg.tar.zst https://repo.msys2.org/mingw/ucrt64/mingw-w64-ucrt-x86_64-gcc-libs-14.2.0-2-any.pkg.tar.zst
- name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && !inputs.coverage }}
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
shell: ${{ inputs.shell }} {0}
run: |
# Skip for older Ubuntu versions
if [[ $(lsb_release -r | awk -F '[^0-9]+' '{print $2}') -ge 24 ]]; then
# Install GCC-14
sudo apt-get update -qq
sudo apt-get install -yq gcc-14
# Add GCC-14 to alternatives
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
# Set GCC-14 as the default
sudo update-alternatives --set gcc /usr/bin/gcc-14
fi
- name: Install ccache on Linux/Mac
if: inputs.os == 'linux' || inputs.os == 'macos'
uses: hendrikmuhs/ccache-action@v1.2
with:
create-symlink: true
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
evict-old-files: 7d
- name: Install ccache on Windows
if: inputs.os == 'windows'
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ matrix.os }}-${{ matrix.builder }}-${{ matrix.cpu }}-${{ matrix.tests }}-${{ matrix.nim_version }}
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
evict-old-files: 7d
- name: Enable ccache on Windows
@ -202,7 +202,7 @@ runs:
- name: Restore Nim toolchain binaries from cache
id: nim-cache
uses: actions/cache@v4
if : ${{ !inputs.coverage }}
if : ${{ inputs.coverage != 'true' }}
with:
path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
@ -218,7 +218,7 @@ runs:
run: |
git config --global core.symlinks false
- name: Build Nim and Codex dependencies
- name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0}
run: |
which gcc

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability
of Windows and macOS runners. Therefore, anything that reduces the time spent in
Windows or macOS jobs will have a positive impact on the time waiting for
runners to become available. The usage limits for Github Actions are [described
here][limits]. You can see a breakdown of runner usage for your jobs in the
Github Actions tab ([example][usage]).
When running on the Github free, pro or team plan, the bottleneck when
optimizing workflows is the availability of macOS runners. Therefore, anything
that reduces the time spent in macOS jobs will have a positive impact on the
time waiting for runners to become available. On the Github enterprise plan,
this is not the case and you can more freely use parallelization on multiple
runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in
mind however that availability of macOS and Windows runners is the biggest
bottleneck. If you split a Windows job into two jobs, you now need to wait for
two Windows runners to become available! Therefore parallelization often only
makes sense for Linux jobs.
might consider running unit tests and integration tests in parallel. When
running on the Github free, pro or team plan, keep in mind that availability of
macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
need to wait for two macOS runners to become available.
### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -24,9 +24,9 @@ jobs:
run:
shell: ${{ matrix.shell }} {0}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }}
timeout-minutes: 120
timeout-minutes: 90
steps:
- name: Checkout sources
uses: actions/checkout@v4
@ -38,6 +38,7 @@ jobs:
uses: ./.github/actions/nimbus-build-system
with:
os: ${{ matrix.os }}
cpu: ${{ matrix.cpu }}
shell: ${{ matrix.shell }}
nim_version: ${{ matrix.nim_version }}
coverage: false
@ -47,20 +48,22 @@ jobs:
if: matrix.tests == 'unittest' || matrix.tests == 'all'
run: make -j${ncpu} test
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
- name: Setup Node.js
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
uses: actions/setup-node@v4
with:
node-version: 18.15
node-version: 22
- name: Start Ethereum node with Codex contracts
- name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
working-directory: vendor/logos-storage-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm install
npm ci
npm start &
# Wait for the contracts to be deployed
sleep 5
## Part 2 Tests ##
- name: Contract tests
@ -70,13 +73,15 @@ jobs:
## Part 3 Tests ##
- name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all'
env:
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} testIntegration
- name: Upload integration tests log files
uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/
retention-days: 1

View File

@ -9,36 +9,28 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: v2.0.14
nim_version: v2.2.4
concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix
@ -61,11 +53,7 @@ jobs:
suggest: true
coverage:
# Force to stick to ubuntu 20.04 for coverage because
# lcov was updated to 2.x version in ubuntu-latest
# and cause a lot of issues.
# See https://github.com/linux-test-project/lcov/issues/238
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -1,38 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
inputs:
run_release_tests:
description: Run Release tests
required: false
type: boolean
default: false
jobs:
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true -d:verify_circuit=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
run_release_tests: ${{ inputs.run_release_tests }}
secrets: inherit

View File

@ -34,6 +34,11 @@ on:
description: Set latest tag for Docker images
required: false
type: boolean
tag_stable:
default: false
description: Set stable tag for Docker images
required: false
type: boolean
tag_sha:
default: true
description: Set Git short commit as Docker tag
@ -59,6 +64,14 @@ on:
required: false
type: string
default: false
contract_image:
description: Specifies compatible smart contract image
required: false
type: string
outputs:
codex_image:
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
env:
@ -69,10 +82,12 @@ env:
NIMFLAGS: ${{ inputs.nimflags }}
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
TAG_LATEST: ${{ inputs.tag_latest }}
TAG_STABLE: ${{ inputs.tag_stable }}
TAG_SHA: ${{ inputs.tag_sha }}
TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests
TESTS_SOURCE: codex-storage/cs-codex-dist-tests
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
@ -80,8 +95,20 @@ env:
jobs:
# Compute variables
compute:
name: Compute build ID
runs-on: ubuntu-latest
outputs:
build_id: ${{ steps.build_id.outputs.build_id }}
steps:
- name: Generate unique build id
id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
# Build platform specific image
build:
needs: compute
strategy:
fail-fast: true
matrix:
@ -108,11 +135,19 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Docker - Variables
run: |
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
- name: Docker - Set up Buildx
uses: docker/setup-buildx-action@v3
@ -147,7 +182,7 @@ jobs:
- name: Docker - Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.target.arch }}
name: digests-${{ needs.compute.outputs.build_id }}-${{ matrix.target.arch }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
@ -159,35 +194,41 @@ jobs:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.meta.outputs.version }}
needs: build
codex_image: ${{ steps.image_tag.outputs.codex_image }}
needs: [build, compute]
steps:
- name: Docker - Variables
run: |
# Adjust custom suffix when set and
# Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi
# Disable SHA tags on tagged release
if [[ ${{ startsWith(github.ref, 'refs/tags/') }} == "true" ]]; then
echo "TAG_SHA=false" >>$GITHUB_ENV
echo "TAG_SHA=false" >> $GITHUB_ENV
fi
# Handle latest and latest-custom using raw
if [[ ${{ env.TAG_SHA }} == "false" ]]; then
echo "TAG_LATEST=false" >>$GITHUB_ENV
echo "TAG_RAW=true" >>$GITHUB_ENV
echo "TAG_LATEST=false" >> $GITHUB_ENV
echo "TAG_RAW=true" >> $GITHUB_ENV
if [[ -z "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_RAW_VALUE=latest" >>$GITHUB_ENV
echo "TAG_RAW_VALUE=latest" >> $GITHUB_ENV
else
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >>$GITHUB_ENV
echo "TAG_RAW_VALUE=latest-{{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi
else
echo "TAG_RAW=false" >>$GITHUB_ENV
echo "TAG_RAW=false" >> $GITHUB_ENV
fi
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Download digests
uses: actions/download-artifact@v4
with:
pattern: digests-*
pattern: digests-${{ needs.compute.outputs.build_id }}-*
merge-multiple: true
path: /tmp/digests
@ -199,12 +240,14 @@ jobs:
uses: docker/metadata-action@v5
with:
images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
flavor: |
latest=${{ env.TAG_LATEST }}
suffix=${{ env.TAG_SUFFIX }},onlatest=true
tags: |
type=semver,pattern={{version}}
type=raw,enable=${{ env.TAG_RAW }},value=latest
type=raw,enable=${{ env.TAG_STABLE }},value=stable
type=sha,enable=${{ env.TAG_SHA }}
- name: Docker - Login to Docker Hub
@ -219,9 +262,12 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image
run: |
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
# Compute Tests inputs
@ -270,7 +316,7 @@ jobs:
max-parallel: 1
matrix:
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
@ -287,7 +333,7 @@ jobs:
name: Run Release Tests
needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-release-tests.yaml@master
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}

View File

@ -18,11 +18,27 @@ on:
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit

View File

@ -52,7 +52,7 @@ jobs:
node-version: 18
- name: Build OpenAPI
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Codex API"
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false

View File

@ -15,15 +15,14 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix

View File

@ -4,13 +4,15 @@ on:
push:
tags:
- 'v*.*.*'
branches:
- master
workflow_dispatch:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned
rust_version: 1.79.0
codex_binary_base: codex
storage_binary_base: storage
cirdl_binary_base: cirdl
build_dir: build
nim_flags: ''
@ -28,9 +30,8 @@ jobs:
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
@ -72,18 +73,18 @@ jobs:
windows*) os_name="windows" ;;
esac
github_ref_name="${GITHUB_REF_NAME/\//-}"
codex_binary="${{ env.codex_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then
codex_binary="${codex_binary}.exe"
storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi
echo "codex_binary=${codex_binary}" >>$GITHUB_ENV
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build
run: |
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.codex_binary }} ${{ env.nim_flags }}"
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Libraries
@ -94,11 +95,11 @@ jobs:
done
fi
- name: Release - Upload codex build artifacts
- name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.codex_binary }}
path: ${{ env.build_dir }}/${{ env.codex_binary_base }}*
name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30
- name: Release - Upload cirdl build artifacts
@ -138,7 +139,7 @@ jobs:
}
# Compress and prepare
for file in ${{ env.codex_binary_base }}* ${{ env.cirdl_binary_base }}*; do
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
@ -170,6 +171,34 @@ jobs:
path: /tmp/release/
retention-days: 30
- name: Release - Upload to the cloud
env:
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
s3_bucket: ${{ secrets.S3_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
run: |
# Variables
branch="${GITHUB_REF_NAME/\//-}"
folder="/tmp/release"
# Tagged releases
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
# Custom branch
else
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
fi
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
@ -177,3 +206,12 @@ jobs:
files: |
/tmp/release/*
make_latest: true
- name: Generate Python SDK
uses: peter-evans/repository-dispatch@v3
if: startsWith(github.ref, 'refs/tags/')
with:
token: ${{ secrets.DISPATCH_PAT }}
repository: logos-storage/logos-storage-py-api-client
event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

55
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/questionable"]
path = vendor/questionable
url = https://github.com/status-im/questionable.git
ignore = untracked
branch = master
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/asynctest"]
path = vendor/asynctest
url = https://github.com/status-im/asynctest.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-presto"]
path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,13 @@
path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git
ignore = untracked
branch = master
branch = stable
[submodule "vendor/nim-leopard"]
path = vendor/nim-leopard
url = https://github.com/status-im/nim-leopard.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
[submodule "vendor/logos-storage-nim-dht"]
path = vendor/logos-storage-nim-dht
url = https://github.com/logos-storage/logos-storage-nim-dht.git
ignore = untracked
branch = master
[submodule "vendor/nim-datastore"]
@ -178,9 +173,11 @@
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/logos-storage-contracts-eth"]
path = vendor/logos-storage-contracts-eth
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
ignore = untracked
branch = master
[submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization
@ -195,29 +192,41 @@
url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
url = https://github.com/logos-storage/nim-poseidon2.git
ignore = untracked
branch = master
[submodule "vendor/constantine"]
path = vendor/constantine
url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
url = https://github.com/logos-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
[submodule "vendor/logos-storage-proofs-circuits"]
path = vendor/logos-storage-proofs-circuits
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"]
path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git
url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git
url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"]
path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git
[submodule "vendor/nph"]
path = vendor/nph
url = https://github.com/arnetheduck/nph.git
[submodule "vendor/nim-quic"]
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = main
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = main

2
Jenkinsfile vendored
View File

@ -25,7 +25,7 @@ pipeline {
stage('Check') {
steps {
script {
sh './result/bin/codex --version'
sh './result/bin/storage --version'
}
}
}

View File

@ -15,8 +15,7 @@
#
# If NIM_COMMIT is set to "nimbusbuild", this will use the
# version pinned by nimbus-build-system.
#PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
PINNED_NIM_VERSION := v2.0.14
PINNED_NIM_VERSION := v2.2.4
ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION)
@ -94,10 +93,10 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
# Builds the Logos Storage binary
all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
@ -139,12 +138,12 @@ test: | build deps
# Builds and runs the smart contract tests
testContracts: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs the integration tests
testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs all tests (except for Taiko L2 tests)
testAll: | build deps
@ -179,11 +178,11 @@ coverage:
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
cd nimcache/release/testCodex && rm -f *.c
mkdir -p coverage
lcov --capture --directory nimcache/release/testCodex --output-file coverage/coverage.info
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
echo -e $(BUILD_MSG) "coverage/report/index.html"
genhtml coverage/coverage.f.info --output-directory coverage/report
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
show-coverage:
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
@ -233,6 +232,7 @@ format:
$(NPH) *.nim
$(NPH) codex/
$(NPH) tests/
$(NPH) library/
clean-nph:
rm -f $(NPH)
@ -243,4 +243,32 @@ print-nph-path:
clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
endif
endif # "variables.mk" was not included

View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine
# Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
[![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run
For detailed instructions on preparing to build nim-codex see [*Build Codex*](https://docs.codex.storage/learn/build).
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run:
@ -29,12 +29,12 @@ The executable will be placed under the `build` directory under the project root
Run the client with:
```bash
build/codex
build/storage
```
## Configuration
It is possible to configure a Codex node in several ways:
It is possible to configure a Logos Storage node in several ways:
1. CLI options
2. Environment variables
3. Configuration file
@ -45,22 +45,72 @@ Please check [documentation](https://docs.codex.storage/learn/run#configuration)
## Guides
To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
To get acquainted with Logos Storage, consider:
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling.
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.

View File

@ -10,17 +10,17 @@ nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
## Logos Storage Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Runs Logos Storage's prover setup with Ark / Circom.
Compile:
```sh

View File

@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
@ -118,7 +118,7 @@ proc createCircuit*(
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir

View File

@ -41,7 +41,7 @@ template benchmark*(name: untyped, count: int, blk: untyped) =
)
benchRuns[benchmarkName] = (runs.avg(), count)
template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
template printBenchMarkSummaries*(printRegular = true, printTsv = true) =
if printRegular:
echo ""
for k, v in benchRuns:
@ -53,7 +53,6 @@ template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =

View File

@ -3,7 +3,7 @@ mode = ScriptMode.Verbose
import std/os except commandLineParams
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
@ -18,57 +18,82 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
let
# Place build output in 'build' folder, even if name includes a longer path.
outName = os.lastPathPart(name)
cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
name & ".nim"
srcName & ".nim"
exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params
exec "build/" & name
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
if not dirExists "build":
mkDir "build"
task codex, "build codex binary":
if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " &
"-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " &
params & " " & srcDir & name & ".nim"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task storage, "build logos storage binary":
buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl"
task testCodex, "Build & run Codex tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true"
task testStorage, "Build & run Logos Storage tests":
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Codex Contract tests":
task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts"
task testIntegration, "Run integration tests":
buildBinary "codex",
outName = "storage",
params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
test "testIntegration"
# use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary":
codexTask()
task build, "build Logos Storage binary":
storageTask()
task test, "Run tests":
testCodexTask()
testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask()
testStorageTask()
testContractsTask()
testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
storageTask()
test "testTaiko"
import strutils
@ -101,23 +126,43 @@ task coverage, "generates code coverage report":
test "coverage",
srcDir = "tests/",
params =
" --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c")
rmDir("coverage")
mkDir("coverage")
echo " ======== Running LCOV ======== "
exec(
"lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info"
"lcov --capture --keep-going --directory nimcache/coverage --output-file coverage/coverage.info"
)
exec(
"lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " &
"lcov --extract coverage/coverage.info --keep-going --output-file coverage/coverage.f.info " &
nimSrcs
)
echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
echo " ======== Coverage report Done ======== "
task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -45,7 +45,7 @@ when isMainModule:
let config = CodexConf.load(
version = codexFullVersion,
envVarsPrefix = "codex",
envVarsPrefix = "storage",
secondarySources = proc(
config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} =
@ -54,6 +54,16 @@ when isMainModule:
,
)
config.setupLogging()
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)):
@ -89,15 +99,15 @@ when isMainModule:
try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure
## Ctrl+C handling
proc doShutdown() =
shutdown = server.stop()
shutdown = server.shutdown()
state = CodexStatus.Stopping
notice "Stopping Codex"
notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} =
when defined(windows):
@ -128,7 +138,7 @@ when isMainModule:
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
@ -149,7 +159,7 @@ when isMainModule:
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure
notice "Exited codex"
notice "Exited Storage"

View File

@ -1,5 +1,5 @@
version = "0.1.0"
author = "Codex Team"
author = "Logos Storage Team"
description = "p2p data durability engine"
license = "MIT"
binDir = "build"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
@ -41,23 +43,25 @@ type Advertiser* = ref object of RootObj
advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} =
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
if cid notin b.advertiseQueue:
await b.advertiseQueue.put(cid)
trace "Advertising", cid
proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
without isM =? cid.isManifest, err:
warn "Unable to determine if cid is manifest"
return
try:
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
@ -70,77 +74,78 @@ proc advertiseBlock(b: Advertiser, cid: Cid) {.async.} =
# announce manifest cid and tree cid
await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid)
except CancelledError as exc:
trace "Cancelled advertise block", cid
raise exc
except CatchableError as e:
error "failed to advertise block", cid, error = e.msgDetail
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning:
try:
if cids =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
while b.advertiserRunning:
if cidsIter =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
trace "Advertiser begins iterating blocks..."
for c in cids:
for c in cidsIter:
if cid =? await c:
await b.advertiseBlock(cid)
trace "Advertiser iterating blocks finished."
await sleepAsync(b.advertiseLocalStoreLoopSleep)
except CancelledError:
break # do not propagate as advertiseLocalStoreLoop was asyncSpawned
except CatchableError as e:
error "failed to advertise blocks in local store", error = e.msgDetail
warn "Cancelled advertise local store loop"
info "Exiting advertise task loop"
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
while b.advertiserRunning:
try:
while b.advertiserRunning:
let cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs:
continue
try:
let request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
finally:
defer:
b.inFlightAdvReqs.del(cid)
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
except CancelledError:
trace "Advertise task cancelled"
return
except CatchableError as exc:
warn "Exception in advertise task runner", exc = exc.msg
warn "Cancelled advertise task runner"
info "Exiting advertise task runner"
proc start*(b: Advertiser) {.async.} =
proc start*(b: Advertiser) {.async: (raises: []).} =
## Start the advertiser
##
trace "Advertiser start"
proc onBlock(cid: Cid) {.async.} =
# The advertiser is expected to be started only once.
if b.advertiserRunning:
raiseAssert "Advertiser can only be started once — this should not happen"
proc onBlock(cid: Cid) {.async: (raises: []).} =
try:
await b.advertiseBlock(cid)
except CancelledError:
trace "Cancelled advertise block", cid
doAssert(b.localStore.onBlockStored.isNone())
b.localStore.onBlockStored = onBlock.some
if b.advertiserRunning:
warn "Starting advertiser twice"
return
b.advertiserRunning = true
for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
b.trackedFutures.track(b.advertiseLocalStoreLoop)
asyncSpawn b.advertiseLocalStoreLoop
proc stop*(b: Advertiser) {.async.} =
proc stop*(b: Advertiser) {.async: (raises: []).} =
## Stop the advertiser
##

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
@ -38,6 +39,7 @@ const
DefaultConcurrentDiscRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
type DiscoveryEngine* = ref object of RootObj
@ -48,77 +50,90 @@ type DiscoveryEngine* = ref object of RootObj
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentDiscReqs: int # Concurrent discovery requests
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Max number of peers with block
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
try:
await b.discoveryQueue.put(cid)
except CancelledError:
trace "Discovery loop cancelled"
return
except CatchableError as exc:
warn "Exception in discovery loop", exc = exc.msg
try:
logScope:
sleep = b.discoveryLoopSleep
wanted = b.pendingBlocks.len
await sleepAsync(b.discoveryLoopSleep)
except CancelledError:
discard # do not propagate as discoveryQueueLoop was asyncSpawned
trace "Discovery loop cancelled"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks
##
while b.discEngineRunning:
try:
while b.discEngineRunning:
let cid = await b.discoveryQueue.get()
if cid in b.inFlightDiscReqs:
trace "Discovery request already in progress", cid
continue
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
if haves.len < b.minPeersPerBlock:
try:
let request = b.discovery.find(cid).wait(DefaultDiscoveryTimeout)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
let peers = await request
defer:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
finally:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
except CancelledError:
trace "Discovery task cancelled"
return
except CatchableError as exc:
warn "Exception in discovery task runner", exc = exc.msg
except Exception as e:
# Raised by b.discovery.removeProvider somehow...
# This should not be catchable, and we should never get here. Therefore,
# raise a Defect.
raiseAssert "Exception when removing provider"
info "Exiting discovery task runner"
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
for cid in cids:
if cid notin b.discoveryQueue:
try:
@ -126,11 +141,11 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
except CatchableError as exc:
warn "Exception queueing discovery request", exc = exc.msg
proc start*(b: DiscoveryEngine) {.async.} =
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
## Start the discengine task
##
trace "Discovery engine start"
trace "Discovery engine starting"
if b.discEngineRunning:
warn "Starting discovery engine twice"
@ -140,12 +155,13 @@ proc start*(b: DiscoveryEngine) {.async.} =
for i in 0 ..< b.concurrentDiscReqs:
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
asyncSpawn fut
b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop)
proc stop*(b: DiscoveryEngine) {.async.} =
trace "Discovery engine started"
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
## Stop the discovery engine
##
@ -171,6 +187,7 @@ proc new*(
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
@ -186,4 +203,5 @@ proc new*(
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
discoveryLoopSleep: discoveryLoopSleep,
minPeersPerBlock: minPeersPerBlock,
maxPeersPerBlock: maxPeersPerBlock,
)

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
@ -15,9 +17,6 @@ import ../peers
export nitro
export results
push:
{.upraises: [].}
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -34,7 +34,7 @@ declareGauge(
const
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 500.millis
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
@ -42,7 +42,7 @@ type
BlockReq* = object
handle*: BlockHandle
inFlight*: bool
requested*: ?PeerId
blockRetries*: int
startTime*: int64
@ -50,12 +50,13 @@ type
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
self: PendingBlocksManager, address: BlockAddress, inFlight = false
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
@ -65,11 +66,13 @@ proc getWantHandle*(
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
@ -86,9 +89,22 @@ proc getWantHandle*(
return handle
proc getWantHandle*(
self: PendingBlocksManager, cid: Cid, inFlight = false
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), inFlight)
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*(
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
@ -108,9 +124,6 @@ proc resolve*(
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
@ -128,19 +141,40 @@ func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func setInFlight*(self: PendingBlocksManager, address: BlockAddress, inFlight = true) =
## Set inflight status for a block
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
self.blocks.withValue(address, pending):
pending[].inFlight = inFlight
func isInFlight*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block is in flight
##
if self.isRequested(address):
return false
self.blocks.withValue(address, pending):
result = pending[].inFlight
pending[].requested = peer.some
return true
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -35,13 +35,14 @@ const
DefaultMaxInflight* = 100
type
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
@ -49,6 +50,9 @@ type
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -58,15 +62,20 @@ type
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] {.gcsafe.}
WantCancellationSender* =
proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
BlocksDeliverySender* =
proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* =
proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
) {.async: (raises: [CancelledError]).}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
async: (raises: [CancelledError])
.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
async: (raises: [CancelledError])
.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object
sendWantList*: WantListSender
@ -98,7 +107,9 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer
##
@ -106,8 +117,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
trace "Unable to send, peer not found", peerId = id
return
let peer = b.peers[id]
try:
let peer = b.peers[id]
await b.inflightSema.acquire()
await peer.send(msg)
except CancelledError as error:
@ -117,7 +129,9 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
finally:
b.inflightSema.release()
proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} =
proc handleWantList(
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
) {.async: (raises: []).} =
## Handle incoming want list
##
@ -133,7 +147,7 @@ proc sendWantList*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer
##
@ -154,14 +168,14 @@ proc sendWantList*(
proc sendWantCancellations*(
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async.} =
) {.async: (raises: []).} =
## Handle incoming blocks
##
@ -170,7 +184,7 @@ proc handleBlocksDelivery(
proc sendBlocksDelivery*(
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote
##
@ -178,7 +192,7 @@ proc sendBlocksDelivery*(
proc handleBlockPresence(
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async.} =
) {.async: (raises: []).} =
## Handle block presence
##
@ -187,7 +201,7 @@ proc handleBlockPresence(
proc sendBlockPresence*(
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
): Future[void] =
) {.async: (raw: true, raises: [CancelledError]).} =
## Send presence to remote
##
@ -195,20 +209,24 @@ proc sendBlockPresence*(
proc handleAccount(
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async.} =
) {.async: (raises: []).} =
## Handle account info
##
if not network.handlers.onAccount.isNil:
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] =
proc sendAccount*(
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] =
proc sendPayment*(
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote
##
@ -216,7 +234,7 @@ proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[
proc handlePayment(
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async.} =
) {.async: (raises: []).} =
## Handle payment
##
@ -224,99 +242,123 @@ proc handlePayment(
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
b: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} =
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
b.trackedFutures.track(b.handleWantList(peer, msg.wantList))
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
b.trackedFutures.track(b.handleBlocksDelivery(peer, msg.payload))
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
b.trackedFutures.track(b.handleBlockPresence(peer, msg.blockPresences))
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account):
b.trackedFutures.track(b.handleAccount(peer, account))
self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
b.trackedFutures.track(b.handlePayment(peer, payment))
self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
if peer in b.peers:
return b.peers.getOrDefault(peer, nil)
if peer in self.peers:
return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try:
trace "Getting new connection stream", peer
return await b.switch.dial(peer, Codec)
return await self.switch.dial(peer, Codec)
except CancelledError as error:
raise error
except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn):
getConn = b.getConn
if not isNil(self.getConn):
getConn = self.getConn
let rpcHandler = proc(
p: NetworkPeer, msg: Message
) {.async: (raises: [CatchableError]).} =
await b.rpcHandler(p, msg)
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await self.rpcHandler(p, msg)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
if b.isSelf(peer.peerId):
if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId
return
if peer.peerId in b.peers:
if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId
return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer
##
trace "Dropping peer", peer
b.peers.del(peer)
trace "Cleaning up departed peer", peer
self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(self: BlockExcNetwork) =
method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization
##
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
self.setupPeer(peerId)
await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else:
self.dropPeer(peerId)
warn "Unknown peer event", event
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
let peerId = conn.peerId
let blockexcPeer = self.getOrCreatePeer(peerId)
await blockexcPeer.readLoop(conn) # attach read loop
@ -353,26 +395,32 @@ proc new*(
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false,
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.gcsafe.} =
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account)
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment)
self.request = BlockExcRequest(

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
@ -18,6 +16,7 @@ import ../protobuf/blockexc
import ../protobuf/message
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
logScope:
topics = "codex blockexcnetworkpeer"
@ -25,11 +24,9 @@ logScope:
const DefaultYieldInterval = 50.millis
type
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
RPCHandler* = proc(
peer: NetworkPeer, msg: Message
): Future[void].Raising(CatchableError) {.gcsafe.}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
@ -37,56 +34,68 @@ type
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
proc connected*(b: NetworkPeer): bool =
not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof)
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
if isNil(conn):
trace "No connection to read from", peer = b.id
trace "No connection to read from", peer = self.id
return
trace "Attaching read loop", peer = b.id, connId = conn.oid
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + b.yieldInterval
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + b.yieldInterval
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = b.id, nextYield = nextYield, interval = b.yieldInterval
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
trace "Received message", peer = b.id, connId = conn.oid
await b.handler(b, msg)
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
trace "Detaching read loop", peer = b.id, connId = conn.oid
warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
if b.connected:
trace "Already connected", peer = b.id, connId = b.sendConn.oid
return b.sendConn
proc connect*(
self: NetworkPeer
): Future[Connection] {.async: (raises: [CancelledError]).} =
if self.connected:
trace "Already connected", peer = self.id, connId = self.sendConn.oid
return self.sendConn
b.sendConn = await b.getConn()
asyncSpawn b.readLoop(b.sendConn)
return b.sendConn
self.sendConn = await self.getConn()
self.trackedFutures.track(self.readLoop(self.sendConn))
return self.sendConn
proc send*(b: NetworkPeer, msg: Message) {.async.} =
let conn = await b.connect()
proc send*(
self: NetworkPeer, msg: Message
) {.async: (raises: [CancelledError, LPStreamError]).} =
let conn = await self.connect()
if isNil(conn):
warn "Unable to get send connection for peer message not sent", peer = b.id
warn "Unable to get send connection for peer message not sent", peer = self.id
return
trace "Sending message", peer = b.id, connId = conn.oid
trace "Sending message", peer = self.id, connId = conn.oid
try:
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*(
T: type NetworkPeer,
@ -96,4 +105,9 @@ func new*(
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler)
NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,28 +25,77 @@ import ../../logutils
export payments, nitro
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
staleness
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -63,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
price += precense[].price
price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,16 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import std/sequtils
import pkg/upraises
push:
{.upraises: [].}
import pkg/chronos
import pkg/libp2p
@ -65,21 +62,23 @@ func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address))
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid))
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if peer.peerHave.anyIt(it == address):
if address in peer:
res.with.add(peer)
else:
res.without.add(peer)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes
import std/sequtils
import pkg/stew/endians2
import message
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash =
hash(e.address)

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes
# Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
@ -25,11 +25,15 @@ type
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
@ -97,7 +101,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
@ -128,7 +132,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
@ -254,16 +258,14 @@ proc decode*(
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var
value = Message()
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
pb = initProtoBuffer(msg)
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(
?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))
)
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes.
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import pkg/upraises
import ./blockexc
export AccountMessage
@ -11,9 +12,6 @@ export StateChannelUpdate
export stint
export nitro
push:
{.upraises: [].}
type Account* = object
address*: EthAddress

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./blockexc
import ../../blocktype
@ -11,9 +12,6 @@ export questionable
export stint
export BlockPresenceType
upraises.push:
{.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,16 +9,14 @@
import std/tables
import std/sugar
import std/hashes
export tables
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/stew/[byteutils, endians2]
import pkg/questionable
import pkg/questionable/results
@ -67,6 +65,13 @@ proc `$`*(a: BlockAddress): string =
else:
"cid: " & $a.cid
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/questionable
import pkg/questionable/results
@ -28,8 +25,11 @@ const DefaultChunkSize* = DefaultBlockSize
type
# default reader type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks
Chunker* = ref object
@ -74,7 +74,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0
try:
while res < len:
@ -85,7 +85,7 @@ proc new*(
raise error
except LPStreamError as error:
error "LPStream error", err = error.msg
raise error
raise newException(ChunkerError, "LPStream error", error)
except CatchableError as exc:
error "CatchableError exception", exc = exc.msg
raise newException(Defect, exc.msg)
@ -102,7 +102,7 @@ proc new*(
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.gcsafe, async, raises: [Defect].} =
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0
try:
while total < len:

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos
import pkg/stew/endians2
import pkg/upraises
import pkg/stint
type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64
Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, upraises: [].} =
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,6 +12,7 @@ import std/strutils
import std/os
import std/tables
import std/cpuinfo
import std/net
import pkg/chronos
import pkg/taskpools
@ -21,14 +22,13 @@ import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
import ./node
import ./conf
import ./rng
import ./rng as random
import ./rest/api
import ./stores
import ./slots
@ -56,10 +56,21 @@ type
codexNode: CodexNodeRef
repoStore: RepoStore
maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
func node*(self: CodexServer): CodexNodeRef =
return self.codexNode
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
@ -83,7 +94,9 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(config.ethProvider)
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
@ -103,7 +116,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config)
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
@ -125,7 +138,7 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
@ -134,6 +147,10 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
@ -152,9 +169,13 @@ proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config
if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start()
s.maintenance.start()
await s.codexNode.switch.start()
@ -168,18 +189,55 @@ proc start*(s: CodexServer) {.async.} =
await s.bootstrapInteractions()
await s.codexNode.start()
if s.restServer != nil:
s.restServer.start()
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
s.isStarted = true
await allFuturesThrowing(
s.restServer.stop(),
proc stop*(s: CodexServer) {.async.} =
if not s.isStarted:
warn "Storage is not started"
return
notice "Stopping Storage node"
var futures =
@[
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop(),
)
]
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
if res.failure.len > 0:
error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures = @[s.codexNode.close(), s.repoStore.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*(
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
@ -189,13 +247,13 @@ proc new*(
.new()
.withPrivateKey(privateKey)
.withAddresses(config.listenAddrs)
.withRng(Rng.instance())
.withRng(random.Rng.instance())
.withNoise()
.withMplex(5.minutes, 5.minutes)
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr})
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build()
var
@ -279,7 +337,7 @@ proc new*(
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
@ -304,10 +362,13 @@ proc new*(
taskPool = taskpool,
)
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress, config.apiPort),
initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high,
)
@ -321,4 +382,5 @@ proc new*(
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance,
taskpool: taskpool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -16,8 +16,10 @@ import std/terminal # Is not used in tests
{.pop.}
import std/options
import std/parseutils
import std/strutils
import std/typetraits
import std/net
import pkg/chronos
import pkg/chronicles/helpers
@ -27,13 +29,12 @@ import pkg/confutils/std/net
import pkg/toml_serialization
import pkg/metrics
import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils
import pkg/libp2p
import pkg/ethers
import pkg/questionable
import pkg/questionable/results
import pkg/stew/base64
import ./codextypes
import ./discovery
@ -44,15 +45,16 @@ import ./utils
import ./nat
import ./utils/natutils
from ./contracts/config import DefaultRequestCacheSize
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
type ThreadCount* = distinct Natural
@ -61,21 +63,19 @@ proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string =
let dataDir =
when defined(windows):
"AppData" / "Roaming" / "Codex"
"AppData" / "Roaming" / "Storage"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
"Library" / "Application Support" / "Storage"
else:
".cache" / "codex"
".cache" / "storage"
getHomeDir() / dataDir
const
codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false
storage_enable_api_debug_peers* {.booldefine.} = false
storage_enable_proof_failures* {.booldefine.} = false
storage_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
DefaultCircuitDir* = defaultDataDir() / "circuits"
DefaultThreadCount* = ThreadCount(0)
type
@ -137,9 +137,9 @@ type
.}: Port
dataDir* {.
desc: "The directory where codex will store configuration and data",
defaultValue: DefaultDataDir,
defaultValueDesc: $DefaultDataDir,
desc: "The directory where Storage will store configuration and data",
defaultValue: defaultDataDir(),
defaultValueDesc: "",
abbr: "d",
name: "data-dir"
.}: OutDir
@ -198,14 +198,16 @@ type
.}: ThreadCount
agentString* {.
defaultValue: "Codex",
defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
apiBindAddress* {.
desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr"
.}: string
desc: "The REST API bind address",
defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
.}: Option[string]
apiPort* {.
desc: "The REST Api port",
@ -263,6 +265,13 @@ type
name: "block-mn"
.}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {.
desc:
"The size of the block cache, 0 disables the cache - " &
@ -370,34 +379,43 @@ type
hidden
.}: uint16
maxPriorityFeePerGas* {.
desc:
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
defaultValue: DefaultMaxPriorityFeePerGas,
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
name: "max-priority-fee-per-gas",
hidden
.}: uint64
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Codex will store proof circuit data",
defaultValue: DefaultCircuitDir,
defaultValueDesc: $DefaultCircuitDir,
desc: "Directory where Storage will store proof circuit data",
defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: "data/circuits",
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.r1cs",
defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs",
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
defaultValueDesc: "data/circuits/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.wasm",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm",
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
defaultValueDesc: "data/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit",
defaultValue: $DefaultCircuitDir / "proof_main.zkey",
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey",
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
defaultValueDesc: "data/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
@ -467,7 +485,7 @@ func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string =
let tag = strip(staticExec("git tag"))
let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace:
return "untagged build"
return tag
@ -477,76 +495,100 @@ proc getCodexRevision(): string =
var res = strip(staticExec("git rev-parse --short HEAD"))
return res
proc getCodexContractsRevision(): string =
let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res
proc getNimBanner(): string =
staticExec("nim --version | grep Version")
const
codexVersion* = getCodexVersion()
codexRevision* = getCodexRevision()
codexContractsRevision* = getCodexContractsRevision()
nimBanner* = getNimBanner()
codexFullVersion* =
"Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" &
nimBanner
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
): MultiAddress {.upraises: [ValueError].} =
): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress
try:
let res = MultiAddress.init(input)
if res.isOk:
ma = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
except LPError as exc:
warn "Invalid MultiAddress uri", uri = input, error = exc.msg
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure
ma
proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} =
let count = parseInt(input)
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
try:
let count = parseInt(p)
if count != 0 and count < 2:
warn "Invalid number of threads", input = input
quit QuitFailure
ThreadCount(count)
return err("Invalid number of threads: " & p)
return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord
try:
if not res.fromURI(uri):
warn "Invalid SignedPeerRecord uri", uri = uri
quit QuitFailure
except LPError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure
res
if not res.fromURI(p):
return err("The uri is not a valid SignedPeerRecord: " & p)
return ok(res)
except LPError, Base64Error:
let e = getCurrentException()
return err(e.msg)
func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} =
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii
of "any":
NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none":
NatConfig(hasExtIp: false, nat: NatStrategy.NatNone)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp":
NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp)
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6 ..^ 1])
NatConfig(hasExtIp: true, extIp: ip)
return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1]
raise newException(ValueError, error)
return err(error)
else:
let error = "Not a valid NAT option: " & p
raise newException(ValueError, error)
return err("Not a valid NAT option: " & p)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
@ -554,25 +596,31 @@ proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get()
proc parseCmdArg*(T: type NBytes, val: string): T =
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64
let count = parseSize(val, num, alwaysBin = true)
let count = parseSize(p, num, alwaysBin = true)
if count == 0:
warn "Invalid number of bytes", nbytes = val
return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T =
let res = NBytes.parse(val)
if res.isErr:
fatal "Cannot parse NBytes.", error = res.error(), input = val
quit QuitFailure
NBytes(num)
return res.get()
proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration
let count = parseDuration(val, dur)
if count == 0:
warn "Cannot parse duration", dur = dur
fatal "Cannot parse duration", dur = dur
quit QuitFailure
dur
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -583,7 +631,7 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
try:
val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err:
warn "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) =
@ -595,12 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk:
val = res.get()
else:
warn "Invalid MultiAddress", input = input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
proc readValue*(
r: var TomlReader, val: var NBytes
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var value = 0'i64
var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true)
@ -611,7 +659,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var ThreadCount
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
try:
val = parseCmdArg(ThreadCount, str)
@ -620,7 +668,7 @@ proc readValue*(
proc readValue*(
r: var TomlReader, val: var Duration
) {.upraises: [SerializationError, IOError].} =
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
var dur: Duration
let count = parseDuration(str, dur)
@ -687,7 +735,7 @@ proc stripAnsi*(v: string): string =
res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones)
let directives = logLevel.split(";")
try:
@ -756,7 +804,7 @@ proc setupLogging*(conf: CodexConf) =
of LogKind.None:
noOutput
when codex_enable_log_counter:
when storage_enable_log_counter:
var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter)
@ -767,15 +815,6 @@ proc setupLogging*(conf: CodexConf) =
else:
defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled:
let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,8 @@
const ContentIdsExts = [
multiCodec("codex-root"),
multiCodec("codex-manifest"),
multiCodec("codex-block"),
multiCodec("codex-slot-root"),
multiCodec("codex-proving-root"),
multiCodec("codex-slot-cell"),
]

View File

@ -1,13 +1,13 @@
Codex Contracts in Nim
Logos Storage Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Nim API for the [Logos Storage smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
storage contract, see [Logos Storage Contracts][1].
Smart contract
--------------
@ -144,5 +144,5 @@ await storage
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,3 +1,5 @@
{.push raises: [].}
import std/times
import pkg/ethers
import pkg/questionable
@ -5,6 +7,7 @@ import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
@ -18,9 +21,12 @@ type OnChainClock* = ref object of Clock
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(provider: provider, newBlock: newAsyncEvent())
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
@ -32,15 +38,12 @@ proc update(clock: OnChainClock, blck: Block) =
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async.} =
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CancelledError as error:
raise error
except CatchableError as error:
debug "error updating clock: ", error = error.msg
discard
method start*(clock: OnChainClock) {.async.} =
if clock.started:
@ -52,7 +55,7 @@ method start*(clock: OnChainClock) {.async.} =
return
# ignore block parameter; hardhat may call this with pending blocks
asyncSpawn clock.update()
clock.trackedFutures.track(clock.update())
await clock.update()
@ -64,13 +67,16 @@ method stop*(clock: OnChainClock) {.async.} =
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,10 +1,11 @@
import pkg/contractabi
import pkg/ethers/fields
import pkg/ethers/contracts/fields
import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object

View File

@ -9,7 +9,7 @@ import ./marketplace
type Deployment* = ref object
provider: Provider
config: CodexConf
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
@ -18,9 +18,12 @@ const knownAddresses = {
# Taiko Alpha-3 Testnet
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Feb 25 2025 07:24:19 AM (+00:00 UTC)
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0xfFaF679D5Cbfdd5Dbc9Be61C616ed115DFb597ed")}.toTable,
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
# Linea (Status)
"1660990954":
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
@ -32,12 +35,16 @@ proc getKnownAddress(T: type, chainId: UInt256): ?Address =
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
Deployment(provider: provider, config: config)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.config.marketplaceAddress:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()

View File

@ -1,6 +1,6 @@
import std/strformat
import std/strutils
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
@ -22,6 +22,7 @@ type
rewardRecipient: ?Address
configuration: ?MarketplaceConfig
requestCache: LruCache[string, StorageRequest]
allowanceLock: AsyncLock
MarketSubscription = market.Subscription
EventSubscription = ethers.Subscription
@ -49,130 +50,195 @@ func new*(
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
template convertEthersError(body) =
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail)
raiseMarketError(error.msgDetail.prefixWith(msg))
proc config(market: OnChainMarket): Future[MarketplaceConfig] {.async.} =
proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return fetchedConfig
if err =? (await market.loadConfig()).errorOption:
raiseMarketError(err.msg)
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
if market.allowanceLock.isNil:
market.allowanceLock = newAsyncLock()
await market.allowanceLock.acquire()
try:
body
finally:
try:
market.allowanceLock.release()
except AsyncLockError as error:
raise newException(Defect, error.msg, error)
proc approveFunds(
market: OnChainMarket, amount: UInt256
) {.async: (raises: [CancelledError, MarketError]).} =
debug "Approving tokens", amount
convertEthersError:
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
discard await token.increaseAllowance(market.contract.address(), amount).confirm(1)
let owner = await market.signer.getAddress()
let spender = market.contract.address
market.withAllowanceLock:
let allowance = await token.allowance(owner, spender)
discard await token.approve(spender, allowance + amount).confirm(1)
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config()
return some config.proofs.zkeyHash
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
convertEthersError:
method getSigner*(
market: OnChainMarket
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
convertEthersError:
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
method proofTimeout*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError:
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method repairRewardPercentage*(market: OnChainMarket): Future[uint8] {.async.} =
convertEthersError:
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError:
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
convertEthersError:
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError:
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} =
convertEthersError:
method requestStorage(
market: OnChainMarket, request: StorageRequest
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async.} =
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
try:
let key = $id
if market.requestCache.contains(key):
if key in market.requestCache:
return some market.requestCache[key]
convertEthersError:
try:
let request = await market.contract.getRequest(id)
market.requestCache[key] = request
return some request
except Marketplace_UnknownRequest:
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError:
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
except Marketplace_UnknownRequest:
return none RequestState
method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} =
convertEthersError:
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError:
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async.} =
convertEthersError:
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
@ -182,12 +248,12 @@ method getHost(
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async.} =
convertEthersError:
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError:
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except Marketplace_SlotIsFree:
@ -199,42 +265,88 @@ method fillSlot(
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.async.} =
convertEthersError:
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
try:
await market.approveFunds(collateral)
trace "calling fillSlot on contract"
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
trace "fillSlot transaction completed"
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
convertEthersError:
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let gasLimit = (gas * 110) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method freeSlot*(
market: OnChainMarket, slotId: SlotId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to free slot"):
try:
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient,
) # SP's address
collateralRecipient, # SP's address
overrides,
)
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
freeSlot = market.contract.freeSlot(slotId)
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(slotId, overrides)
discard await freeSlot.confirm(1)
except Marketplace_SlotIsFree as parent:
raise newException(
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
)
method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} =
convertEthersError:
method withdrawFunds(
market: OnChainMarket, requestId: RequestId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
@ -242,7 +354,7 @@ method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async
return false
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
@ -252,28 +364,42 @@ method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.a
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError:
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} =
convertEthersError:
method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
try:
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async.} =
convertEthersError:
discard await market.contract.markProofAsMissing(id, period).confirm(1)
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 50% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let gasLimit = (gas * 150) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
method canProofBeMarkedAsMissing*(
trace "calling markProofAsMissing on contract",
estimatedGas = gas, gasLimit = gasLimit
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async.} =
let provider = market.contract.provider
let contractWithoutSigner = market.contract.connect(provider)
let overrides = CallOverrides(blockTag: some BlockTag.pending)
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
@ -281,48 +407,56 @@ method canProofBeMarkedAsMissing*(
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async.} =
convertEthersError:
discard await market.contract
.reserveSlot(
requestId,
slotIndex,
# reserveSlot runs out of gas for unknown reason, but 100k gas covers it
TransactionOverrides(gasLimit: some 100000.u256),
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 25% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let gasLimit = (gas * 125) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
.confirm(1)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError:
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} =
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId, event.ask, event.expiry)
convertEthersError:
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -336,27 +470,27 @@ method subscribeSlotFilled*(
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
@ -364,28 +498,28 @@ method subscribeSlotReservationsFull*(
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
@ -393,28 +527,28 @@ method subscribeFulfillment(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
@ -422,28 +556,28 @@ method subscribeRequestCancelled*(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
@ -451,21 +585,21 @@ method subscribeRequestFailed*(
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} =
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return
callback(event.id)
convertEthersError:
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
@ -475,13 +609,13 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastSlotFilledEvents(fromBlock)
@ -489,21 +623,58 @@ method queryPastSlotFilledEvents*(
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError:
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -51,8 +51,8 @@ type
Proofs_ProofNotMissing* = object of SolidityError
Proofs_ProofNotRequired* = object of SolidityError
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Proofs_InvalidProbability* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
@ -67,7 +67,9 @@ proc requestStorage*(
errors: [
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
Marketplace_InvalidMaxSlotLoss,
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
Marketplace_InsufficientReward, Marketplace_InvalidCid,
]
.}
@ -176,6 +178,17 @@ proc markProofAsMissing*(
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}

View File

@ -1,6 +1,6 @@
import pkg/stint
import pkg/contractabi
import pkg/ethers/fields
import pkg/ethers/contracts/fields
type
Groth16Proof* = object

View File

@ -2,14 +2,13 @@ import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/ethers/fields
import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/libp2p/[cid, multicodec]
import ../logutils
import ../utils/json
import ../clock
from ../errors import mapFailure
export contractabi

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,14 +7,16 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/algorithm
import std/net
import std/sequtils
import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable
import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256
@ -41,6 +43,7 @@ type Discovery* = ref object of RootObj
# record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id
@ -54,10 +57,14 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
trace "protocol.resolve..."
## Find peer using the given Discovery object
##
try:
let node = await d.protocol.resolve(toNodeId(peerId))
return
@ -65,29 +72,53 @@ proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} =
node.get().record.data.some
else:
PeerRecord.none
except CancelledError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
return PeerRecord.none
method find*(
d: Discovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find block providers
##
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
try:
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
error:
warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt(not (it.data.peerId == d.peerId))
except CancelledError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
method provide*(d: Discovery, cid: Cid) {.async, base.} =
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
## Provide a block Cid
##
try:
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
if nodes.len <= 0:
warn "Couldn't provide to any nodes!"
except CancelledError as exc:
warn "Error providing block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing block", cid, exc = exc.msg
method find*(
d: Discovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async, base.} =
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find host providers
##
try:
trace "Finding providers for host", host = $host
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
error:
@ -102,22 +133,46 @@ method find*(
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
return providers
except CancelledError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
method provide*(
d: Discovery, host: ca.Address
) {.async: (raises: [CancelledError]), base.} =
## Provide hosts
##
try:
trace "Providing host", host = $host
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
except CancelledError as exc:
warn "Error providing host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing host", host = $host, exc = exc.msg
method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} =
method removeProvider*(
d: Discovery, peerId: PeerId
): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table
##
trace "Removing provider", peerId
d.protocol.removeProvidersLocal(peerId)
try:
await d.protocol.removeProvidersLocal(peerId)
except CancelledError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
except Exception as exc: # Something in discv5 is raising Exception
warn "Error removing provider", peerId = peerId, exc = exc.msg
raiseAssert("Unexpected Exception in removeProvider")
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
@ -125,7 +180,7 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs
info "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some
@ -137,7 +192,7 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
##
trace "Updating Dht record", addrs = addrs
info "Updating Dht record", addrs = addrs
d.dhtRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some
@ -145,12 +200,23 @@ proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
proc start*(d: Discovery) {.async.} =
proc start*(d: Discovery) {.async: (raises: []).} =
try:
d.protocol.open()
await d.protocol.start()
d.isStarted = true
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async.} =
await d.protocol.closeWait()
proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:
error "Error stopping discovery", exc = exc.msg
proc new*(
T: type Discovery,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import ../stores

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/[sugar, atomics, sequtils]
@ -25,6 +22,7 @@ import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../clock
import ../blocktype as bt
import ../utils
import ../utils/asynciter
@ -120,19 +118,22 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
(idx - step) div steps
proc getPendingBlocks(
self: Erasure, manifest: Manifest, indicies: seq[int]
self: Erasure, manifest: Manifest, indices: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
var
proc attachIndex(
fut: Future[?!bt.Block], i: int
): Future[(?!bt.Block, int)] {.async.} =
## avoids closure capture issues
return (await fut, i)
for blockIndex in indices:
# request blocks from the store
pendingBlocks = indicies.map(
(i: int) =>
self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map(
(r: ?!bt.Block) => (r, i)
) # Get the data blocks (first K)
)
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
pendingBlocks.add(attachIndex(fut, blockIndex))
proc isFinished(): bool =
pendingBlocks.len == 0
@ -168,16 +169,16 @@ proc prepareEncodingData(
strategy = params.strategy.init(
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
return failure(err)
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
@ -185,7 +186,7 @@ proc prepareEncodingData(
resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount):
for idx in indices.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
@ -218,8 +219,8 @@ proc prepareDecodingData(
strategy = encoded.protectedStrategy.init(
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
var
dataPieces = 0
@ -233,7 +234,7 @@ proc prepareDecodingData(
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let pos = indexToPos(encoded.steps, idx, step)
@ -310,10 +311,10 @@ proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
else:
task[].success.store(true)
proc encodeAsync*(
proc asyncEncode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
data: ref seq[seq[byte]],
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
@ -322,13 +323,10 @@ proc encodeAsync*(
defer:
threadPtr.close().expect("closing once works")
var blockData = createDoubleArray(blocksLen, blockSize)
for i in 0 ..< data[].len:
copyMem(blockData[i], addr data[i][0], blockSize)
var data = makeUncheckedArray(blocks)
defer:
freeDoubleArray(blockData, blocksLen)
dealloc(data)
## Create an ecode task with block data
var task = EncodeTask(
@ -336,33 +334,26 @@ proc encodeAsync*(
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: blockData,
blocks: data,
parity: parity,
signal: threadPtr,
)
let t = addr task
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardEncodeTask(self.taskPool, t)
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
try:
await threadFut.join()
except CatchableError as exc:
try:
await threadFut
except AsyncError as asyncExc:
return failure(asyncExc.msg)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(exc.msg)
return failure(joinErr)
if not t.success.load():
return failure("Leopard encoding failed")
if not task.success.load():
return failure("Leopard encoding task failed")
success()
@ -392,6 +383,8 @@ proc encodeData(
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
defer:
freeDoubleArray(parity, params.ecM)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
@ -409,15 +402,13 @@ proc encodeData(
try:
if err =? (
await self.encodeAsync(
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(parity, params.ecM)
var idx = params.rounded + step
for j in 0 ..< params.ecM:
@ -429,8 +420,8 @@ proc encodeData(
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
@ -489,6 +480,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
@ -506,9 +498,7 @@ proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
else:
task[].success.store(true)
discard task[].signal.fireSync()
proc decodeAsync*(
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
@ -521,24 +511,12 @@ proc decodeAsync*(
threadPtr.close().expect("closing once works")
var
blocksData = createDoubleArray(blocksLen, blockSize)
parityData = createDoubleArray(parityLen, blockSize)
for i in 0 ..< blocks[].len:
if blocks[i].len > 0:
copyMem(blocksData[i], addr blocks[i][0], blockSize)
else:
blocksData[i] = nil
for i in 0 ..< parity[].len:
if parity[i].len > 0:
copyMem(parityData[i], addr parity[i][0], blockSize)
else:
parityData[i] = nil
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
freeDoubleArray(blocksData, blocksLen)
freeDoubleArray(parityData, parityLen)
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
var task = DecodeTask(
@ -547,44 +525,33 @@ proc decodeAsync*(
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blocksData,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
)
# Hold the task pointer until the signal is received
let t = addr task
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardDecodeTask(self.taskPool, t)
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
try:
await threadFut.join()
except CatchableError as exc:
try:
await threadFut
except AsyncError as asyncExc:
return failure(asyncExc.msg)
finally:
if exc of CancelledError:
raise (ref CancelledError) exc
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(exc.msg)
return failure(joinErr)
if not t.success.load():
return failure("Leopard encoding failed")
if not task.success.load():
return failure("Leopard decoding task failed")
success()
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
proc decodeInternal(
self: Erasure, encoded: Manifest
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
@ -608,6 +575,8 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
data = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
defer:
freeDoubleArray(recovered, encoded.ecK)
data[].setLen(encoded.ecK) # set len to K
parityData[].setLen(encoded.ecM) # set len to M
@ -627,15 +596,13 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
trace "Erasure decoding data"
try:
if err =? (
await self.decodeAsync(
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
finally:
freeDoubleArray(recovered, encoded.ecK)
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
@ -649,10 +616,12 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
@ -664,6 +633,19 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
finally:
decoder.release()
return (cids, recoveredIndices).success
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
@ -685,6 +667,44 @@ proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
return decoded.success
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
## Repair a protected manifest by reconstructing the full dataset
##
## `encoded` - the encoded (protected) manifest to
## be repaired
##
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
without repaired =? (
await self.encode(
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
)
), err:
return failure(err)
if repaired.treeCid != encoded.treeCid:
return failure(
"Original tree root differs from the repaired tree root encoded out of recovered data"
)
return success()
proc start*(self: Erasure) {.async.} =
return

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,7 +7,11 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/options
import std/sugar
import std/sequtils
import pkg/results
import pkg/chronos
@ -19,6 +23,8 @@ type
CodexError* = object of CatchableError # base codex error
CodexResult*[T] = Result[T, ref CodexError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
template mapFailure*[T, V, E](
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =
@ -40,35 +46,43 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
else:
T.failure("Option is None")
# allFuturesThrowing was moved to the tests in libp2p
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
var futs: seq[Future[T]]
for fut in args:
futs &= fut
proc call() {.async.} =
var first: ref CatchableError = nil
futs = await allFinished(futs)
for fut in futs:
if fut.failed:
let err = fut.readError()
if err of Defect:
raise err
proc allFinishedFailed*[T](
futs: auto
): Future[FinishedFailed[T]] {.async: (raises: [CancelledError]).} =
## Check if all futures have finished or failed
##
## TODO: wip, not sure if we want this - at the minimum,
## we should probably avoid the async transform
var res: FinishedFailed[T] = (@[], @[])
await allFutures(futs)
for f in futs:
if f.failed:
res.failure.add f
else:
if err of CancelledError:
raise err
if isNil(first):
first = err
if not isNil(first):
raise first
res.success.add f
return call()
return res
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
try:
await allFuturesThrowing(fut)
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
proc allFinishedValues*[T](
futs: auto
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
## If all futures have finished, return corresponding values,
## otherwise return failure
##
return success()
# wait for all futures to be either completed, failed or canceled
await allFutures(futs)
let numOfFailed = futs.countIt(it.failed)
if numOfFailed > 0:
return failure "Some futures failed (" & $numOfFailed & "))"
# here, we know there are no failed futures in "futs"
# and we are only interested in those that completed successfully
let values = collect:
for b in futs:
if b.finished:
b.value
return success values

View File

@ -24,13 +24,17 @@ type
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingWrongGroupCountError* = object of IndexingError
IndexingWrongPadBlockCountError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
strategyType*: StrategyType # Indexing strategy algorithm
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
iterations*: int # Number of iteration steps (0 ..< iterations)
step*: int # Step size between generated indices
groupCount*: int # Number of groups to partition indices into
padBlockCount*: int # Number of padding blocks to append per group
func checkIteration(
self: IndexingStrategy, iteration: int
@ -44,39 +48,47 @@ func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration
last = self.lastIndex
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
self.getLinearIndices(iteration)
of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration)
self.getSteppedIndices(iteration)
func getIndices*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
{.cast(noSideEffect).}:
Iter[int].new(
iterator (): int {.gcsafe.} =
for value in self.getStrategyIndices(iteration):
yield value
for i in 0 ..< self.padBlockCount:
yield self.lastIndex + (iteration + 1) + i * self.groupCount
)
func init*(
strategy: StrategyType, firstIndex, lastIndex, iterations: int
strategy: StrategyType,
firstIndex, lastIndex, iterations: int,
groupCount = 0,
padBlockCount = 0,
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
@ -91,10 +103,24 @@ func init*(
"iterations (" & $iterations & ") must be greater than zero.",
)
if padBlockCount < 0:
raise newException(
IndexingWrongPadBlockCountError,
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
)
if padBlockCount > 0 and groupCount <= 0:
raise newException(
IndexingWrongGroupCountError,
"groupCount (" & $groupCount & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex + 1), iterations),
groupCount: groupCount,
padBlockCount: padBlockCount,
)

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
##
## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,11 +9,9 @@
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
push:
{.upraises: [].}
{.push raises: [].}
import std/tables
import std/sequtils

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,10 +9,7 @@
# This module defines all operations on Manifest
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]

View File

@ -1,5 +1,4 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
@ -18,17 +17,20 @@ export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, upraises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].}
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte]
# Marketplace events -- located here due to the Market abstraction
@ -62,25 +64,42 @@ type
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getSigner*(market: Market): Future[Address] {.base, async.} =
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
method getSigner*(
market: Market
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofTimeout*(market: Market): Future[uint64] {.base, async.} =
method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(market: Market): Future[uint8] {.base, async.} =
method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented")
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
@ -91,7 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(market: Market, request: StorageRequest) {.base, async.} =
method requestStorage*(
market: Market, request: StorageRequest
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
@ -102,7 +123,7 @@ method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async.} =
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method requestState*(
@ -110,7 +131,9 @@ method requestState*(
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} =
method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getRequestEnd*(
@ -125,12 +148,12 @@ method requestExpiresAt*(
method getHost*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.base, async.} =
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method currentCollateral*(
market: Market, slotId: SlotId
): Future[UInt256] {.base, async.} =
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
raiseAssert("not implemented")
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
@ -142,13 +165,17 @@ method fillSlot*(
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256,
) {.base, async.} =
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
method freeSlot*(
market: Market, slotId: SlotId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} =
method withdrawFunds*(
market: Market, requestId: RequestId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeRequests*(
@ -167,20 +194,24 @@ method getChallenge*(
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} =
method submitProof*(
market: Market, id: SlotId, proof: Groth16Proof
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} =
raiseAssert("not implemented")
method canProofBeMarkedAsMissing*(
method markProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async.} =
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method reserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
) {.base, async.} =
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canReserveSlot*(
@ -243,7 +274,7 @@ method subscribeProofSubmission*(
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} =
method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
@ -270,3 +301,13 @@ method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p
import pkg/questionable
@ -27,11 +24,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: CodexTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.leavesCount.uint64)
for node in self.nodes:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(3, nodesPb)
@ -40,7 +37,7 @@ proc encode*(self: CodexTree): seq[byte] =
pb.buffer
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var leavesCount: uint64
discard ?pb.getField(1, mcodecCode).mapFailure
@ -63,13 +60,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
proc encode*(self: CodexProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.index.uint64)
pb.write(3, self.nleaves.uint64)
for node in self.path:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(4, nodesPb)
@ -78,7 +75,7 @@ proc encode*(self: CodexProof): seq[byte] =
pb.buffer
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var index: uint64
var nleaves: uint64

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -15,7 +15,7 @@ import std/sequtils
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/constantine/hashes
import ../../utils
import ../../rng
import ../../errors
@ -47,28 +47,6 @@ type
CodexProof* = ref object of ByteProof
mcodec*: MultiCodec
# CodeHashes is not exported from libp2p
# So we need to recreate it instead of
proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
for item in HashesList:
result[item.mcodec] = item
const CodeHashes = initMultiHashCodeTable()
func mhash*(mcodec: MultiCodec): ?!MHash =
let mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var proof = CodexProof(mcodec: self.mcodec)
@ -128,13 +106,12 @@ proc `$`*(self: CodexProof): string =
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash =
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes
##
var digest = newSeq[byte](mhash.size)
mhash.coder(@x & @y & @[key.byte], digest)
success digest
let input = @x & @y & @[key.byte]
let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
func init*(
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
@ -143,12 +120,12 @@ func init*(
return failure "Empty leaves"
let
mhash = ?mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
compress(x, y, key, mcodec)
digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len:
if digestSize != leaves[0].len:
return failure "Invalid hash length"
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
@ -186,12 +163,12 @@ proc fromNodes*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
if mhash.size != nodes[0].len:
if digestSize != nodes[0].len:
return failure "Invalid hash length"
var
@ -224,10 +201,10 @@ func init*(
return failure "Empty nodes"
let
mhash = ?mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
success CodexProof(
compress: compressor,

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

11
codex/multicodec_exts.nim Normal file
View File

@ -0,0 +1,11 @@
const CodecExts = [
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
("codex-manifest", 0xCD01),
("codex-block", 0xCD02),
("codex-root", 0xCD03),
("codex-slot-root", 0xCD04),
("codex-proving-root", 0xCD05),
("codex-slot-cell", 0xCD06),
]

40
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,40 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
import poseidon2
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.Sponge.digest(data).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash,
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
size: 32,
coder: poseidon2_sponge_rate2,
),
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
size: 32,
coder: poseidon2_merkle_2kb_sponge,
),
]

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -9,11 +9,11 @@
{.push raises: [].}
import
std/[options, os, strutils, times, net],
stew/shims/net as stewNet,
stew/[objects, results],
std/[options, os, strutils, times, net, atomics],
stew/[objects],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net
json_serialization/std/net,
results
import pkg/chronos
import pkg/chronicles
@ -28,14 +28,29 @@ const
PORT_MAPPING_INTERVAL = 20 * 60 # seconds
NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL
var
upnp {.threadvar.}: Miniupnp
npmp {.threadvar.}: NatPmp
strategy = NatStrategy.NatNone
type PortMappings* = object
internalTcpPort: Port
externalTcpPort: Port
internalUdpPort: Port
externalUdpPort: Port
description: string
type PortMappingArgs =
tuple[strategy: NatStrategy, tcpPort, udpPort: Port, description: string]
type NatConfig* = object
case hasExtIp*: bool
of true: extIp*: IpAddress
of false: nat*: NatStrategy
var
upnp {.threadvar.}: Miniupnp
npmp {.threadvar.}: NatPmp
strategy = NatStrategy.NatNone
natClosed: Atomic[bool]
extIp: Option[IpAddress]
activeMappings: seq[PortMappings]
natThreads: seq[Thread[PortMappingArgs]] = @[]
logScope:
topics = "nat"
@ -107,7 +122,7 @@ proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress]
else:
try:
externalIP = parseIpAddress($(nires.value))
strategy = NatPmp
strategy = NatStrategy.NatPmp
return some(externalIP)
except ValueError as e:
error "parseIpAddress() exception", err = e.msg
@ -153,7 +168,7 @@ proc getPublicRoutePrefSrcOrExternalIP*(
return some(extIp.get)
proc doPortMapping(
tcpPort, udpPort: Port, description: string
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
): Option[(Port, Port)] {.gcsafe.} =
var
extTcpPort: Port
@ -213,15 +228,10 @@ proc doPortMapping(
extUdpPort = extPort
return some((extTcpPort, extUdpPort))
type PortMappingArgs = tuple[tcpPort, udpPort: Port, description: string]
var
natThread: Thread[PortMappingArgs]
natCloseChan: Channel[bool]
proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
ignoreSignalsInThread()
let
(tcpPort, udpPort, description) = args
(strategy, tcpPort, udpPort, description) = args
interval = initDuration(seconds = PORT_MAPPING_INTERVAL)
sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C
@ -233,30 +243,23 @@ proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
# even though we don't need the external IP's value.
let ipres = getExternalIP(strategy, quiet = true)
if ipres.isSome:
while true:
while natClosed.load() == false:
let
# we're being silly here with this channel polling because we can't
# select on Nim channels like on Go ones
let (dataAvailable, _) =
try:
natCloseChan.tryRecv()
except Exception:
(false, false)
if dataAvailable:
return
else:
let currTime = now()
currTime = now()
if currTime >= (lastUpdate + interval):
discard doPortMapping(tcpPort, udpPort, description)
discard doPortMapping(strategy, tcpPort, udpPort, description)
lastUpdate = currTime
sleep(sleepDuration)
proc stopNatThread() {.noconv.} =
proc stopNatThreads() {.noconv.} =
# stop the thread
debug "Stopping NAT port mapping renewal threads"
try:
natCloseChan.send(true)
natThread.joinThread()
natCloseChan.close()
natClosed.store(true)
joinThreads(natThreads)
except Exception as exc:
warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg
@ -268,12 +271,14 @@ proc stopNatThread() {.noconv.} =
# In Windows, a new thread is created for the signal handler, so we need to
# initialise our threadvars again.
let ipres = getExternalIP(strategy, quiet = true)
if ipres.isSome:
if strategy == NatStrategy.NatUpnp:
for entry in activeMappings:
for t in [
(externalTcpPort, internalTcpPort, UPNPProtocol.TCP),
(externalUdpPort, internalUdpPort, UPNPProtocol.UDP),
(entry.externalTcpPort, entry.internalTcpPort, UPNPProtocol.TCP),
(entry.externalUdpPort, entry.internalUdpPort, UPNPProtocol.UDP),
]:
let
(eport, iport, protocol) = t
@ -284,9 +289,10 @@ proc stopNatThread() {.noconv.} =
debug "UPnP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
elif strategy == NatStrategy.NatPmp:
for entry in activeMappings:
for t in [
(externalTcpPort, internalTcpPort, NatPmpProtocol.TCP),
(externalUdpPort, internalUdpPort, NatPmpProtocol.UDP),
(entry.externalTcpPort, entry.internalTcpPort, NatPmpProtocol.TCP),
(entry.externalUdpPort, entry.internalUdpPort, NatPmpProtocol.UDP),
]:
let
(eport, iport, protocol) = t
@ -299,23 +305,34 @@ proc stopNatThread() {.noconv.} =
debug "NAT-PMP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
proc redirectPorts*(tcpPort, udpPort: Port, description: string): Option[(Port, Port)] =
result = doPortMapping(tcpPort, udpPort, description)
proc redirectPorts*(
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
): Option[(Port, Port)] =
result = doPortMapping(strategy, tcpPort, udpPort, description)
if result.isSome:
(externalTcpPort, externalUdpPort) = result.get()
let (externalTcpPort, externalUdpPort) = result.get()
# needed by NAT-PMP on port mapping deletion
internalTcpPort = tcpPort
internalUdpPort = udpPort
# Port mapping works. Let's launch a thread that repeats it, in case the
# NAT-PMP lease expires or the router is rebooted and forgets all about
# these mappings.
natCloseChan.open()
activeMappings.add(
PortMappings(
internalTcpPort: tcpPort,
externalTcpPort: externalTcpPort,
internalUdpPort: udpPort,
externalUdpPort: externalUdpPort,
description: description,
)
)
try:
natThread.createThread(
repeatPortMapping, (externalTcpPort, externalUdpPort, description)
natThreads.add(Thread[PortMappingArgs]())
natThreads[^1].createThread(
repeatPortMapping, (strategy, externalTcpPort, externalUdpPort, description)
)
# atexit() in disguise
addQuitProc(stopNatThread)
if natThreads.len == 1:
# we should register the thread termination function only once
addQuitProc(stopNatThreads)
except Exception as exc:
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
@ -326,12 +343,15 @@ proc setupNat*(
## If any of this fails, we don't return any IP address but do return the
## original ports as best effort.
## TODO: Allow for tcp or udp port mapping to be optional.
let extIp = getExternalIP(natStrategy)
if extIp.isNone:
extIp = getExternalIP(natStrategy)
if extIp.isSome:
let ip = extIp.get
let extPorts = (
{.gcsafe.}:
redirectPorts(tcpPort = tcpPort, udpPort = udpPort, description = clientId)
redirectPorts(
strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId
)
)
if extPorts.isSome:
let (extTcpPort, extUdpPort) = extPorts.get()
@ -343,11 +363,6 @@ proc setupNat*(
warn "UPnP/NAT-PMP not available"
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
type NatConfig* = object
case hasExtIp*: bool
of true: extIp*: IpAddress
of false: nat*: NatStrategy
proc setupAddress*(
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -44,7 +44,7 @@ import ./indexingstrategy
import ./utils
import ./errors
import ./logutils
import ./utils/asynciter
import ./utils/safeasynciter
import ./utils/trackedfutures
export logutils
@ -52,7 +52,10 @@ export logutils
logScope:
topics = "codex node"
const DefaultFetchBatch = 10
const
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
Contracts* =
@ -78,7 +81,9 @@ type
CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch =
return self.switch
@ -109,7 +114,9 @@ proc storeManifest*(
success blk
proc fetchManifest*(self: CodexNodeRef, cid: Cid): Future[?!Manifest] {.async.} =
proc fetchManifest*(
self: CodexNodeRef, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
## Fetch and decode a manifest block
##
@ -144,7 +151,7 @@ proc connect*(
proc updateExpiry*(
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
without manifest =? await self.fetchManifest(manifestCid), error:
trace "Unable to fetch manifest for cid", manifestCid
return failure(error)
@ -153,7 +160,11 @@ proc updateExpiry*(
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
)
await allFuturesThrowing(ensuringFutures)
let res = await allFinishedFailed[?!void](ensuringFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -168,7 +179,7 @@ proc fetchBatched*(
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async, gcsafe.} =
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
@ -178,22 +189,61 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
while not iter.finished:
let blocks = collect:
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if not (await address in self.networkStore) or fetchLocal:
self.networkStore.getBlock(address)
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
if blocksErr =? (await allFutureResult(blocks)).errorOption:
return failure(blocksErr)
var blockResults = await self.networkStore.getBlocks(addresses)
if not onBatch.isNil and
batchErr =? (await onBatch(blocks.mapIt(it.read.get))).errorOption:
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
inc(successfulBlocks)
inc(completedInWindow)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
await sleepAsync(1.millis)
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
success()
@ -203,7 +253,7 @@ proc fetchBatched*(
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] =
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
## Fetch manifest in batches of `batchSize`
##
@ -213,7 +263,31 @@ proc fetchBatched*(
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async.} =
proc fetchDatasetAsync*(
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc streamSingleBlock(
self: CodexNodeRef, cid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of a single block.
##
trace "Streaming single block", cid = cid
@ -223,36 +297,31 @@ proc streamSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!LPStream] {.async
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async.} =
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
await stream.pushData(blk.data)
except CatchableError as exc:
trace "Unable to send block", cid, exc = exc.msg
discard
finally:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
except CancelledError as exc:
trace "Streaming block cancelled", cid, exc = exc.msg
except LPStreamError as exc:
trace "Unable to send block", cid, exc = exc.msg
self.trackedFutures.track(streamOneBlock())
LPStream(stream).success
proc streamEntireDataset(
self: CodexNodeRef,
manifest: Manifest,
manifestCid: Cid,
prefetchBatch = DefaultFetchBatch,
): Future[?!LPStream] {.async.} =
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of the entire dataset described by the manifest.
## Background jobs (erasure decoding and prefetching) will be cancelled when
## the stream is closed.
##
trace "Retrieving blocks from manifest", manifestCid
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async.} =
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let erasure = Erasure.new(
@ -260,40 +329,32 @@ proc streamEntireDataset(
)
without _ =? (await erasure.decode(manifest)), error:
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CancelledError:
trace "Erasure job cancelled", manifestCid
except CatchableError as exc:
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
jobs.add(erasureJob())
proc prefetch(): Future[void] {.async.} =
try:
if err =?
(await self.fetchBatched(manifest, prefetchBatch, fetchLocal = false)).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError:
trace "Prefetch job cancelled"
except CatchableError as exc:
error "Error fetching blocks", exc = exc.msg
jobs.add(prefetch())
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async.} =
proc monitorStream() {.async: (raises: []).} =
try:
await stream.join()
except CancelledError as exc:
warn "Stream cancelled", exc = exc.msg
finally:
await allFutures(jobs.mapIt(it.cancelAndWait))
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
self.trackedFutures.track(monitorStream())
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
stream.success
proc retrieve*(
self: CodexNodeRef, cid: Cid, local: bool = true
): Future[?!LPStream] {.async.} =
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Retrieve by Cid a single block or an entire dataset described by manifest
##
@ -373,6 +434,7 @@ proc store*(
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
@ -402,6 +464,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -449,11 +514,11 @@ proc store*(
return manifestBlk.cid.success
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
without cids =? await self.networkStore.listBlocks(BlockType.Manifest):
without cidsIter =? await self.networkStore.listBlocks(BlockType.Manifest):
warn "Failed to listBlocks"
return
for c in cids:
for c in cidsIter:
if cid =? await c:
without blk =? await self.networkStore.getBlock(cid):
warn "Failed to get manifest block by cid", cid
@ -591,8 +656,13 @@ proc requestStorage*(
success purchase.id
proc onStore(
self: CodexNodeRef, request: StorageRequest, slotIdx: uint64, blocksCb: BlocksCb
): Future[?!void] {.async.} =
self: CodexNodeRef,
request: StorageRequest,
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async: (raises: [CancelledError]).} =
## store data in local storage
##
@ -613,19 +683,22 @@ proc onStore(
trace "Unable to create slots builder", err = err.msg
return failure(err)
let expiry = request.expiry
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
proc updateExpiry(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry.toSecondsSince1970))
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
return failure(updateExpiryErr)
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg
@ -633,18 +706,33 @@ proc onStore(
return success()
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
if isRepairing:
trace "start repairing slot", slotIdx
try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
cid = manifest.treeCid, exc = err.msg
return failure(err)
except CatchableError as exc:
error "Error erasure decoding repairing manifest",
cid = manifest.treeCid, exc = exc.msg
return failure(exc.msg)
else:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
without blksIter =? indexer.getIndicies(slotIdx.int).catch, err:
trace "Unable to get indicies from strategy", err = err.msg
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
trace "Unable to get indices from strategy", err = err.msg
return failure(err)
if err =? (
@ -657,8 +745,6 @@ proc onStore(
trace "Unable to build slot", err = err.msg
return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
@ -670,7 +756,7 @@ proc onStore(
proc onProve(
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async.} =
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
## Generats a proof for a given slot and challenge
##
@ -726,7 +812,7 @@ proc onProve(
proc onExpiryUpdate(
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateExpiry(rootCid, expiry)
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
@ -745,13 +831,17 @@ proc start*(self: CodexNodeRef) {.async.} =
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore = proc(
request: StorageRequest, slot: uint64, onBatch: BatchProc
): Future[?!void] =
self.onStore(request, slot, onBatch)
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] =
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
@ -760,7 +850,7 @@ proc start*(self: CodexNodeRef) {.async.} =
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] =
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
# TODO: generate proof
self.onProve(slot, challenge)
@ -791,14 +881,11 @@ proc start*(self: CodexNodeRef) {.async.} =
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node"
if not self.taskpool.isNil:
self.taskpool.shutdown()
await self.trackedFutures.cancelTracked()
if not self.engine.isNil:
@ -819,6 +906,7 @@ proc stop*(self: CodexNodeRef) {.async.} =
if not self.clock.isNil:
await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil:
await self.networkStore.close
@ -845,3 +933,10 @@ proc new*(
contracts: contracts,
trackedFutures: TrackedFutures(),
)
proc hasLocalBlock*(
self: CodexNodeRef, cid: Cid
): Future[bool] {.async: (raises: [CancelledError]).} =
## Returns true if the given Cid is present in the local store
return await (cid in self.networkStore.localStore)

View File

@ -30,12 +30,12 @@ method run*(
requestId = purchase.requestId
proc wait() {.async.} =
let done = newFuture[void]()
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.complete()
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/mimetypes
@ -65,30 +62,43 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} =
return %RestContentList.init(content)
proc isPending(resp: HttpResponseRef): bool =
## Checks that an HttpResponseRef object is still pending; i.e.,
## that no body has yet been sent. This helps us guard against calling
## sendBody(resp: HttpResponseRef, ...) twice, which is illegal.
return resp.getResponseState() == HttpResponseState.Empty
proc retrieveCid(
node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef
): Future[RestApiResponse] {.async.} =
): Future[void] {.async: (raises: [CancelledError, HttpWriteError]).} =
## Download a file from the node in a streaming
## manner
##
var stream: LPStream
var lpStream: LPStream
var bytes = 0
try:
without stream =? (await node.retrieve(cid, local)), error:
if error of BlockNotFoundError:
resp.status = Http404
return await resp.sendBody("")
await resp.sendBody(
"The requested CID could not be retrieved (" & error.msg & ")."
)
return
else:
resp.status = Http500
return await resp.sendBody(error.msg)
await resp.sendBody(error.msg)
return
lpStream = stream
# It is ok to fetch again the manifest because it will hit the cache
without manifest =? (await node.fetchManifest(cid)), err:
error "Failed to fetch manifest", err = err.msg
resp.status = Http404
return await resp.sendBody(err.msg)
await resp.sendBody(err.msg)
return
if manifest.mimetype.isSome:
resp.setHeader("Content-Type", manifest.mimetype.get())
@ -103,7 +113,14 @@ proc retrieveCid(
else:
resp.setHeader("Content-Disposition", "attachment")
await resp.prepareChunked()
# For erasure-coded datasets, we need to return the _original_ length; i.e.,
# the length of the non-erasure-coded dataset, as that's what we will be
# returning to the client.
let contentLength =
if manifest.protected: manifest.originalDatasetSize else: manifest.datasetSize
resp.setHeader("Content-Length", $(contentLength.int))
await resp.prepare(HttpResponseStreamType.Plain)
while not stream.atEof:
var
@ -116,17 +133,20 @@ proc retrieveCid(
bytes += buff.len
await resp.sendChunk(addr buff[0], buff.len)
await resp.send(addr buff[0], buff.len)
await resp.finish()
codex_api_downloads.inc()
except CatchableError as exc:
except CancelledError as exc:
raise exc
except LPStreamError as exc:
warn "Error streaming blocks", exc = exc.msg
resp.status = Http500
return await resp.sendBody("")
if resp.isPending():
await resp.sendBody(exc.msg)
finally:
info "Sent bytes", cid = cid, bytes
if not stream.isNil:
await stream.close()
if not lpStream.isNil:
await lpStream.close()
proc buildCorsHeaders(
httpMethod: string, allowedOrigin: Option[string]
@ -160,7 +180,7 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string =
proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion
router.api(MethodOptions, "/api/codex/v1/data") do(
router.api(MethodOptions, "/api/storage/v1/data") do(
resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -172,7 +192,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/data") do() -> RestApiResponse:
## Upload a file in a streaming manner
##
@ -234,11 +254,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
finally:
await reader.closeWait()
router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/data") do() -> RestApiResponse:
let json = await formatManifestBlocks(node)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodOptions, "/api/codex/v1/data/{cid}") do(
router.api(MethodOptions, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -247,7 +267,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodGet, "/api/codex/v1/data/{cid}") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -263,11 +283,11 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
await node.retrieveCid(cid.get(), local = true, resp = resp)
router.api(MethodDelete, "/api/codex/v1/data/{cid}") do(
router.api(MethodDelete, "/api/storage/v1/data/{cid}") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Deletes either a single block or an entire dataset
## from the local node. Does nothing and returns 200
## from the local node. Does nothing and returns 204
## if the dataset is not locally available.
##
var headers = buildCorsHeaders("DELETE", allowedOrigin)
@ -284,7 +304,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.status = Http204
await resp.sendBody("")
router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do(
router.api(MethodPost, "/api/storage/v1/data/{cid}/network") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network to the local node
@ -299,20 +319,13 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
error "Failed to fetch manifest", err = err.msg
return RestApiResponse.error(Http404, err.msg, headers = headers)
proc fetchDatasetAsync(): Future[void] {.async.} =
try:
if err =? (await node.fetchBatched(manifest)).errorOption:
error "Unable to fetch dataset", cid = cid.get(), err = err.msg
except CatchableError as exc:
error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg
discard
asyncSpawn fetchDatasetAsync()
# Start fetching the dataset in the background
node.fetchDatasetAsyncTask(manifest)
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/stream") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download a file from the network in a streaming
@ -328,9 +341,10 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
resp.setCorsHeaders("GET", corsOrigin)
resp.setHeader("Access-Control-Headers", "X-Requested-With")
resp.setHeader("Access-Control-Expose-Headers", "Content-Disposition")
await node.retrieveCid(cid.get(), local = false, resp = resp)
router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do(
router.api(MethodGet, "/api/storage/v1/data/{cid}/network/manifest") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Download only the manifest.
@ -348,7 +362,23 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
let json = %formatManifest(cid.get(), manifest)
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/data/{cid}/exists") do(
cid: Cid, resp: HttpResponseRef
) -> RestApiResponse:
## Only test if the give CID is available in the local store
##
var headers = buildCorsHeaders("GET", allowedOrigin)
if cid.isErr:
return RestApiResponse.error(Http400, $cid.error(), headers = headers)
let cid = cid.get()
let hasCid = await node.hasLocalBlock(cid)
let json = %*{$cid: hasCid}
return RestApiResponse.response($json, contentType = "application/json")
router.api(MethodGet, "/api/storage/v1/space") do() -> RestApiResponse:
let json =
%RestRepoStore(
totalBlocks: repoStore.totalBlocks,
@ -361,7 +391,7 @@ proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRoute
proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/slots") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
## Returns active slots for the host
@ -379,7 +409,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do(
router.api(MethodGet, "/api/storage/v1/sales/slots/{slotId}") do(
slotId: SlotId
) -> RestApiResponse:
## Returns active slot with id {slotId} for the host. Returns 404 if the
@ -409,7 +439,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
restAgent.toJson, contentType = "application/json", headers = headers
)
router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Returns storage that is for sale
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -431,7 +461,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse:
router.rawApi(MethodPost, "/api/storage/v1/sales/availability") do() -> RestApiResponse:
## Add available storage to sell.
## Every time Availability's offer finishes, its capacity is
## returned to the availability.
@ -460,7 +490,24 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
if restAv.totalSize == 0:
return RestApiResponse.error(
Http400, "Total size must be larger then zero", headers = headers
Http422, "Total size must be larger then zero", headers = headers
)
if restAv.duration == 0:
return RestApiResponse.error(
Http422, "duration must be larger then zero", headers = headers
)
if restAv.minPricePerBytePerSecond == 0:
return RestApiResponse.error(
Http422,
"minPricePerBytePerSecond must be larger then zero",
headers = headers,
)
if restAv.totalCollateral == 0:
return RestApiResponse.error(
Http422, "totalCollateral must be larger then zero", headers = headers
)
if not reservations.hasAvailable(restAv.totalSize):
@ -469,10 +516,19 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
without availability =? (
await reservations.createAvailability(
restAv.totalSize, restAv.duration, restAv.minPricePerBytePerSecond,
restAv.totalSize,
restAv.duration,
restAv.minPricePerBytePerSecond,
restAv.totalCollateral,
enabled = restAv.enabled |? true,
until = restAv.until |? 0,
)
), error:
if error of CancelledError:
raise error
if error of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, error.msg)
return RestApiResponse.error(Http500, error.msg, headers = headers)
return RestApiResponse.response(
@ -485,7 +541,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do(
router.api(MethodOptions, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId, resp: HttpResponseRef
) -> RestApiResponse:
if corsOrigin =? allowedOrigin:
@ -494,7 +550,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
resp.status = Http204
await resp.sendBody("")
router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do(
router.rawApi(MethodPatch, "/api/storage/v1/sales/availability/{id}") do(
id: AvailabilityId
) -> RestApiResponse:
## Updates Availability.
@ -509,6 +565,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
## tokens) to be matched against the request's pricePerBytePerSecond
## totalCollateral - total collateral (in amount of
## tokens) that can be distributed among matching requests
try:
without contracts =? node.contracts.host:
return RestApiResponse.error(Http503, "Persistence is not enabled")
@ -533,17 +590,23 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
return RestApiResponse.error(Http500, error.msg)
if isSome restAv.freeSize:
return RestApiResponse.error(Http400, "Updating freeSize is not allowed")
return RestApiResponse.error(Http422, "Updating freeSize is not allowed")
if size =? restAv.totalSize:
if size == 0:
return RestApiResponse.error(Http422, "Total size must be larger then zero")
# we don't allow lowering the totalSize bellow currently utilized size
if size < (availability.totalSize - availability.freeSize):
return RestApiResponse.error(
Http400,
Http422,
"New totalSize must be larger then current totalSize - freeSize, which is currently: " &
$(availability.totalSize - availability.freeSize),
)
if not reservations.hasAvailable(size):
return RestApiResponse.error(Http422, "Not enough storage quota")
availability.freeSize += size - availability.totalSize
availability.totalSize = size
@ -556,15 +619,26 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
if totalCollateral =? restAv.totalCollateral:
availability.totalCollateral = totalCollateral
if until =? restAv.until:
availability.until = until
if enabled =? restAv.enabled:
availability.enabled = enabled
if err =? (await reservations.update(availability)).errorOption:
if err of CancelledError:
raise err
if err of UntilOutOfBoundsError:
return RestApiResponse.error(Http422, err.msg)
else:
return RestApiResponse.error(Http500, err.msg)
return RestApiResponse.response(Http200)
return RestApiResponse.response(Http204)
except CatchableError as exc:
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500)
router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do(
router.rawApi(MethodGet, "/api/storage/v1/sales/availability/{id}/reservations") do(
id: AvailabilityId
) -> RestApiResponse:
## Gets Availability's reservations.
@ -608,7 +682,7 @@ proc initSalesApi(node: CodexNodeRef, router: var RestRouter) =
proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do(
router.rawApi(MethodPost, "/api/storage/v1/storage/request/{cid}") do(
cid: Cid
) -> RestApiResponse:
var headers = buildCorsHeaders("POST", allowedOrigin)
@ -637,10 +711,36 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
without params =? StorageRequestParams.fromJson(body), error:
return RestApiResponse.error(Http400, error.msg, headers = headers)
let expiry = params.expiry
if expiry <= 0 or expiry >= params.duration:
return RestApiResponse.error(
Http422,
"Expiry must be greater than zero and less than the request's duration",
headers = headers,
)
if params.proofProbability <= 0:
return RestApiResponse.error(
Http422, "Proof probability must be greater than zero", headers = headers
)
if params.collateralPerByte <= 0:
return RestApiResponse.error(
Http422, "Collateral per byte must be greater than zero", headers = headers
)
if params.pricePerBytePerSecond <= 0:
return RestApiResponse.error(
Http422,
"Price per byte per second must be greater than zero",
headers = headers,
)
let requestDurationLimit = await contracts.purchasing.market.requestDurationLimit
if params.duration > requestDurationLimit:
return RestApiResponse.error(
Http400,
Http422,
"Duration exceeds limit of " & $requestDurationLimit & " seconds",
headers = headers,
)
@ -650,13 +750,13 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
if tolerance == 0:
return RestApiResponse.error(
Http400, "Tolerance needs to be bigger then zero", headers = headers
Http422, "Tolerance needs to be bigger then zero", headers = headers
)
# prevent underflow
if tolerance > nodes:
return RestApiResponse.error(
Http400,
Http422,
"Invalid parameters: `tolerance` cannot be greater than `nodes`",
headers = headers,
)
@ -667,21 +767,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
# ensure leopard constrainst of 1 < K ≥ M
if ecK <= 1 or ecK < ecM:
return RestApiResponse.error(
Http400,
Http422,
"Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`",
headers = headers,
)
without expiry =? params.expiry:
return RestApiResponse.error(Http400, "Expiry required", headers = headers)
if expiry <= 0 or expiry >= params.duration:
return RestApiResponse.error(
Http400,
"Expiry needs value bigger then zero and smaller then the request's duration",
headers = headers,
)
without purchaseId =?
await node.requestStorage(
cid, params.duration, params.proofProbability, nodes, tolerance,
@ -689,7 +779,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
), error:
if error of InsufficientBlocksError:
return RestApiResponse.error(
Http400,
Http422,
"Dataset too small for erasure parameters, need at least " &
$(ref InsufficientBlocksError)(error).minSize.int & " bytes",
headers = headers,
@ -702,7 +792,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do(
router.api(MethodGet, "/api/storage/v1/storage/purchases/{id}") do(
id: PurchaseId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -734,7 +824,7 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/storage/purchases") do() -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)
try:
@ -756,7 +846,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
## various node management api's
##
router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/spr") do() -> RestApiResponse:
## Returns node SPR in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -779,7 +869,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/peerid") do() -> RestApiResponse:
## Returns node's peerId in requested format, json or text.
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -798,7 +888,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do(
router.api(MethodGet, "/api/storage/v1/connect/{peerId}") do(
peerId: PeerId, addrs: seq[MultiAddress]
) -> RestApiResponse:
## Connect to a peer
@ -836,7 +926,7 @@ proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
let allowedOrigin = router.allowedOrigin
router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse:
router.api(MethodGet, "/api/storage/v1/debug/info") do() -> RestApiResponse:
## Print rudimentary node information
##
var headers = buildCorsHeaders("GET", allowedOrigin)
@ -856,7 +946,11 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
"",
"announceAddresses": node.discovery.announceAddrs,
"table": table,
"codex": {"version": $codexVersion, "revision": $codexRevision},
"storage": {
"version": $codexVersion,
"revision": $codexRevision,
"contracts": $codexContractsRevision,
},
}
# return pretty json for human readability
@ -867,7 +961,7 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do(
router.api(MethodPost, "/api/storage/v1/debug/chronicles/loglevel") do(
level: Option[string]
) -> RestApiResponse:
## Set log level at run time
@ -893,8 +987,8 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) =
trace "Excepting processing request", exc = exc.msg
return RestApiResponse.error(Http500, headers = headers)
when codex_enable_api_debug_peers:
router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do(
when storage_enable_api_debug_peers:
router.api(MethodGet, "/api/storage/v1/debug/peer/{peerId}") do(
peerId: PeerId
) -> RestApiResponse:
var headers = buildCorsHeaders("GET", allowedOrigin)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -17,7 +17,7 @@ type
proofProbability* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
expiry* {.serialize.}: ?uint64
expiry* {.serialize.}: uint64
nodes* {.serialize.}: ?uint
tolerance* {.serialize.}: ?uint
@ -33,6 +33,8 @@ type
minPricePerBytePerSecond* {.serialize.}: UInt256
totalCollateral* {.serialize.}: UInt256
freeSize* {.serialize.}: ?uint64
enabled* {.serialize.}: ?bool
until* {.serialize.}: ?SecondsSince1970
RestSalesAgent* = object
state* {.serialize.}: string

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,10 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/crypto/crypto
import pkg/bearssl/rand

View File

@ -22,7 +22,7 @@ import ./utils/exceptions
## Sales holds a list of available storage that it may sell.
##
## When storage is requested on the market that matches availability, the Sales
## object will instruct the Codex node to persist the requested data. Once the
## object will instruct the Logos Storage node to persist the requested data. Once the
## data has been persisted, it uploads a proof of storage to the market in an
## attempt to win a storage contract.
##
@ -105,19 +105,15 @@ proc new*(
subscriptions: @[],
)
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
proc remove(sales: Sales, agent: SalesAgent) {.async: (raises: []).} =
await agent.stop()
if sales.running:
sales.agents.keepItIf(it != agent)
proc cleanUp(
sales: Sales,
agent: SalesAgent,
returnBytes: bool,
reprocessSlot: bool,
returnedCollateral: ?UInt256,
processing: Future[void],
) {.async.} =
sales: Sales, agent: SalesAgent, reprocessSlot: bool, returnedCollateral: ?UInt256
) {.async: (raises: []).} =
let data = agent.data
logScope:
@ -132,9 +128,9 @@ proc cleanUp(
# if reservation for the SalesAgent was not created, then it means
# that the cleanUp was called before the sales process really started, so
# there are not really any bytes to be returned
if returnBytes and request =? data.request and reservation =? data.reservation:
if request =? data.request and reservation =? data.reservation:
if returnErr =? (
await sales.context.reservations.returnBytesToAvailability(
await noCancel sales.context.reservations.returnBytesToAvailability(
reservation.availabilityId, reservation.id, request.ask.slotSize
)
).errorOption:
@ -144,60 +140,58 @@ proc cleanUp(
# delete reservation and return reservation bytes back to the availability
if reservation =? data.reservation and
deleteErr =? (
await sales.context.reservations.deleteReservation(
await noCancel sales.context.reservations.deleteReservation(
reservation.id, reservation.availabilityId, returnedCollateral
)
).errorOption:
error "failure deleting reservation", error = deleteErr.msg
if data.slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16", slotIndex = data.slotIndex
return
# Re-add items back into the queue to prevent small availabilities from
# draining the queue. Seen items will be ordered last.
if reprocessSlot and request =? data.request:
if reprocessSlot and request =? data.request and var item =? agent.data.slotQueueItem:
let queue = sales.context.slotQueue
var seenItem = SlotQueueItem.init(
data.requestId, data.slotIndex.uint16, data.ask, request.expiry, seen = true
)
item.seen = true
trace "pushing ignored item to queue, marked as seen"
if err =? queue.push(seenItem).errorOption:
if err =? queue.push(item).errorOption:
error "failed to readd slot to queue", errorType = $(type err), error = err.msg
await sales.remove(agent)
let fut = sales.remove(agent)
sales.trackedFutures.track(fut)
# signal back to the slot queue to cycle a worker
if not processing.isNil and not processing.finished():
processing.complete()
proc filled(
sales: Sales, request: StorageRequest, slotIndex: uint64, processing: Future[void]
) =
proc filled(sales: Sales, request: StorageRequest, slotIndex: uint64) =
if onSale =? sales.context.onSale:
onSale(request, slotIndex)
# signal back to the slot queue to cycle a worker
if not processing.isNil and not processing.finished():
processing.complete()
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
proc processSlot(
sales: Sales, item: SlotQueueItem
) {.async: (raises: [CancelledError]).} =
debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex
let agent =
newSalesAgent(sales.context, item.requestId, item.slotIndex, none StorageRequest)
let agent = newSalesAgent(
sales.context, item.requestId, item.slotIndex, none StorageRequest, some item
)
let completed = newAsyncEvent()
agent.onCleanUp = proc(
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
) {.async.} =
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
trace "slot cleanup"
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
completed.fire()
agent.onFilled = some proc(request: StorageRequest, slotIndex: uint64) =
sales.filled(request, slotIndex, done)
trace "slot filled"
sales.filled(request, slotIndex)
completed.fire()
agent.start(SalePreparing())
sales.agents.add agent
trace "waiting for slot processing to complete"
await completed.wait()
trace "slot processing completed"
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
let reservations = sales.context.reservations
without reservs =? await reservations.all(Reservation):
@ -256,12 +250,9 @@ proc load*(sales: Sales) {.async.} =
newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request)
agent.onCleanUp = proc(
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
) {.async.} =
# since workers are not being dispatched, this future has not been created
# by a worker. Create a dummy one here so we can call sales.cleanUp
let done: Future[void] = nil
await sales.cleanUp(agent, returnBytes, reprocessSlot, returnedCollateral, done)
reprocessSlot = false, returnedCollateral = UInt256.none
) {.async: (raises: []).} =
await sales.cleanUp(agent, reprocessSlot, returnedCollateral)
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
# are inherently already filled and so assigning agent.onFilled would be
@ -270,7 +261,9 @@ proc load*(sales: Sales) {.async.} =
agent.start(SaleUnknown())
sales.agents.add agent
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
proc OnAvailabilitySaved(
sales: Sales, availability: Availability
) {.async: (raises: []).} =
## When availabilities are modified or added, the queue should be unpaused if
## it was paused and any slots in the queue should have their `seen` flag
## cleared.
@ -283,7 +276,7 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
proc onStorageRequested(
sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: uint64
) =
) {.raises: [].} =
logScope:
topics = "marketplace sales onStorageRequested"
requestId
@ -294,7 +287,14 @@ proc onStorageRequested(
trace "storage requested, adding slots to queue"
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
let market = sales.context.market
without collateral =? market.slotCollateral(ask.collateralPerSlot, SlotState.Free),
err:
error "Request failure, unable to calculate collateral", error = err.msg
return
without items =? SlotQueueItem.init(requestId, ask, expiry, collateral).catch, err:
if err of SlotsOutOfRangeError:
warn "Too many slots, cannot add to queue"
else:
@ -324,39 +324,54 @@ proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: uint64) =
let market = context.market
let queue = context.slotQueue
try:
without request =? (await market.getRequest(requestId)), err:
error "unknown request in contract", error = err.msgDetail
return
# Take the repairing state into consideration to calculate the collateral.
# This is particularly needed because it will affect the priority in the queue
# and we want to give the user the ability to tweak the parameters.
# Adding the repairing state directly in the queue priority calculation
# would not allow this flexibility.
without collateral =?
market.slotCollateral(request.ask.collateralPerSlot, SlotState.Repair), err:
error "Failed to add freed slot to queue: unable to calculate collateral",
error = err.msg
return
if slotIndex > uint16.high.uint64:
error "Cannot cast slot index to uint16, value = ", slotIndex
return
# first attempt to populate request using existing metadata in queue
without var found =? queue.populateItem(requestId, slotIndex.uint16):
trace "no existing request metadata, getting request info from contract"
# if there's no existing slot for that request, retrieve the request
# from the contract.
try:
without request =? await market.getRequest(requestId):
error "unknown request in contract"
without slotQueueItem =?
SlotQueueItem.init(request, slotIndex.uint16, collateral = collateral).catch,
err:
warn "Too many slots, cannot add to queue", error = err.msgDetail
return
found = SlotQueueItem.init(request, slotIndex.uint16)
except CancelledError:
discard # do not propagate as addSlotToQueue was asyncSpawned
except CatchableError as e:
error "failed to get request from contract and add slots to queue",
error = e.msgDetail
if err =? queue.push(found).errorOption:
error "failed to push slot items to queue", error = err.msgDetail
if err =? queue.push(slotQueueItem).errorOption:
if err of SlotQueueItemExistsError:
error "Failed to push item to queue because it already exists",
error = err.msgDetail
elif err of QueueNotRunningError:
warn "Failed to push item to queue because queue is not running",
error = err.msgDetail
except CancelledError as e:
trace "sales.addSlotToQueue was cancelled"
# We could get rid of this by adding the storage ask in the SlotFreed event,
# so we would not need to call getRequest to get the collateralPerSlot.
let fut = addSlotToQueue()
sales.trackedFutures.track(fut)
asyncSpawn fut
proc subscribeRequested(sales: Sales) {.async.} =
let context = sales.context
let market = context.market
proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: uint64) =
proc onStorageRequested(
requestId: RequestId, ask: StorageAsk, expiry: uint64
) {.raises: [].} =
sales.onStorageRequested(requestId, ask, expiry)
try:
@ -488,16 +503,20 @@ proc startSlotQueue(sales: Sales) =
let slotQueue = sales.context.slotQueue
let reservations = sales.context.reservations
slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} =
slotQueue.onProcessSlot = proc(item: SlotQueueItem) {.async: (raises: []).} =
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
sales.processSlot(item, done)
try:
await sales.processSlot(item)
except CancelledError:
discard
slotQueue.start()
proc onAvailabilityAdded(availability: Availability) {.async.} =
await sales.onAvailabilityAdded(availability)
proc OnAvailabilitySaved(availability: Availability) {.async: (raises: []).} =
if availability.enabled:
await sales.OnAvailabilitySaved(availability)
reservations.onAvailabilityAdded = onAvailabilityAdded
reservations.OnAvailabilitySaved = OnAvailabilitySaved
proc subscribe(sales: Sales) {.async.} =
await sales.subscribeRequested()

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -27,17 +27,15 @@
## | UInt256 | totalRemainingCollateral | |
## +---------------------------------------------------+
import pkg/upraises
push:
{.upraises: [].}
{.push raises: [], gcsafe.}
import std/sequtils
import std/sugar
import std/typetraits
import std/sequtils
import std/times
import pkg/chronos
import pkg/datastore
import pkg/nimcrypto
import pkg/questionable
import pkg/questionable/results
import pkg/stint
@ -54,8 +52,10 @@ import ../units
export requests
export logutils
from nimcrypto import randomBytes
logScope:
topics = "sales reservations"
topics = "marketplace sales reservations"
type
AvailabilityId* = distinct array[32, byte]
@ -70,6 +70,12 @@ type
minPricePerBytePerSecond* {.serialize.}: UInt256
totalCollateral {.serialize.}: UInt256
totalRemainingCollateral* {.serialize.}: UInt256
# If set to false, the availability will not accept new slots.
# If enabled, it will not impact any existing slots that are already being hosted.
enabled* {.serialize.}: bool
# Specifies the latest timestamp after which the availability will no longer host any slots.
# If set to 0, there will be no restrictions.
until* {.serialize.}: SecondsSince1970
Reservation* = ref object
id* {.serialize.}: ReservationId
@ -77,17 +83,18 @@ type
size* {.serialize.}: uint64
requestId* {.serialize.}: RequestId
slotIndex* {.serialize.}: uint64
validUntil* {.serialize.}: SecondsSince1970
Reservations* = ref object of RootObj
availabilityLock: AsyncLock
# Lock for protecting assertions of availability's sizes when searching for matching availability
repo: RepoStore
onAvailabilityAdded: ?OnAvailabilityAdded
OnAvailabilitySaved: ?OnAvailabilitySaved
GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.}
IterDispose* = proc(): Future[?!void] {.gcsafe, closure.}
OnAvailabilityAdded* =
proc(availability: Availability): Future[void] {.upraises: [], gcsafe.}
GetNext* = proc(): Future[?seq[byte]] {.async: (raises: [CancelledError]), closure.}
IterDispose* = proc(): Future[?!void] {.async: (raises: [CancelledError]), closure.}
OnAvailabilitySaved* =
proc(availability: Availability): Future[void] {.async: (raises: []).}
StorableIter* = ref object
finished*: bool
next*: GetNext
@ -102,13 +109,20 @@ type
SerializationError* = object of ReservationsError
UpdateFailedError* = object of ReservationsError
BytesOutOfBoundsError* = object of ReservationsError
UntilOutOfBoundsError* = object of ReservationsError
const
SalesKey = (CodexMetaKey / "sales").tryGet # TODO: move to sales module
ReservationsKey = (SalesKey / "reservations").tryGet
proc hash*(x: AvailabilityId): Hash {.borrow.}
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.}
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async: (raises: [CancelledError]).}
template withLock(lock, body) =
try:
@ -128,6 +142,8 @@ proc init*(
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
enabled: bool,
until: SecondsSince1970,
): Availability =
var id: array[32, byte]
doAssert randomBytes(id) == 32
@ -139,6 +155,8 @@ proc init*(
minPricePerBytePerSecond: minPricePerBytePerSecond,
totalCollateral: totalCollateral,
totalRemainingCollateral: totalCollateral,
enabled: enabled,
until: until,
)
func totalCollateral*(self: Availability): UInt256 {.inline.} =
@ -154,6 +172,7 @@ proc init*(
size: uint64,
requestId: RequestId,
slotIndex: uint64,
validUntil: SecondsSince1970,
): Reservation =
var id: array[32, byte]
doAssert randomBytes(id) == 32
@ -163,6 +182,7 @@ proc init*(
size: size,
requestId: requestId,
slotIndex: slotIndex,
validUntil: validUntil,
)
func toArray(id: SomeStorableId): array[32, byte] =
@ -189,10 +209,10 @@ logutils.formatIt(LogFormat.textLines, SomeStorableId):
logutils.formatIt(LogFormat.json, SomeStorableId):
it.to0xHexLog
proc `onAvailabilityAdded=`*(
self: Reservations, onAvailabilityAdded: OnAvailabilityAdded
proc `OnAvailabilitySaved=`*(
self: Reservations, OnAvailabilitySaved: OnAvailabilitySaved
) =
self.onAvailabilityAdded = some onAvailabilityAdded
self.OnAvailabilitySaved = some OnAvailabilitySaved
func key*(id: AvailabilityId): ?!Key =
## sales / reservations / <availabilityId>
@ -206,6 +226,11 @@ func key*(availability: Availability): ?!Key =
return availability.id.key
func maxCollateralPerByte*(availability: Availability): UInt256 =
# If freeSize happens to be zero, we convention that the maxCollateralPerByte
# should be equal to totalRemainingCollateral.
if availability.freeSize == 0.uint64:
return availability.totalRemainingCollateral
return availability.totalRemainingCollateral div availability.freeSize.stuint(256)
func key*(reservation: Reservation): ?!Key =
@ -217,11 +242,19 @@ func available*(self: Reservations): uint =
func hasAvailable*(self: Reservations, bytes: uint): bool =
self.repo.available(bytes.NBytes)
proc exists*(self: Reservations, key: Key): Future[bool] {.async.} =
proc exists*(
self: Reservations, key: Key
): Future[bool] {.async: (raises: [CancelledError]).} =
let exists = await self.repo.metaDs.ds.contains(key)
return exists
proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
iterator items(self: StorableIter): auto =
while not self.finished:
yield self.next()
proc getImpl(
self: Reservations, key: Key
): Future[?!seq[byte]] {.async: (raises: [CancelledError]).} =
if not await self.exists(key):
let err =
newException(NotExistsError, "object with key " & $key & " does not exist")
@ -234,7 +267,7 @@ proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} =
proc get*(
self: Reservations, key: Key, T: type SomeStorableObject
): Future[?!T] {.async.} =
): Future[?!T] {.async: (raises: [CancelledError]).} =
without serialized =? await self.getImpl(key), error:
return failure(error)
@ -243,7 +276,9 @@ proc get*(
return success obj
proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} =
proc updateImpl(
self: Reservations, obj: SomeStorableObject
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "updating " & $(obj.type), id = obj.id
without key =? obj.key, error:
@ -256,10 +291,15 @@ proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.a
proc updateAvailability(
self: Reservations, obj: Availability
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
availabilityId = obj.id
if obj.until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
without key =? obj.key, error:
return failure(error)
@ -268,22 +308,26 @@ proc updateAvailability(
trace "Creating new Availability"
let res = await self.updateImpl(obj)
# inform subscribers that Availability has been added
if onAvailabilityAdded =? self.onAvailabilityAdded:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised
try:
await onAvailabilityAdded(obj)
except CancelledError as e:
raise e
except CatchableError as e:
# we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
else:
return failure(err)
if obj.until > 0:
without allReservations =? await self.all(Reservation, obj.id), error:
error.msg = "Error updating reservation: " & error.msg
return failure(error)
let requestEnds = allReservations.mapIt(it.validUntil)
if requestEnds.len > 0 and requestEnds.max > obj.until:
let error = newException(
UntilOutOfBoundsError,
"Until parameter must be greater or equal to the longest currently hosted slot",
)
return failure(error)
# Sizing of the availability changed, we need to adjust the repo reservation accordingly
if oldAvailability.totalSize != obj.totalSize:
trace "totalSize changed, updating repo reservation"
@ -300,32 +344,34 @@ proc updateAvailability(
let res = await self.updateImpl(obj)
if oldAvailability.freeSize < obj.freeSize: # availability added
if oldAvailability.freeSize < obj.freeSize or oldAvailability.duration < obj.duration or
oldAvailability.minPricePerBytePerSecond < obj.minPricePerBytePerSecond or
oldAvailability.totalRemainingCollateral < obj.totalRemainingCollateral:
# availability updated
# inform subscribers that Availability has been modified (with increased
# size)
if onAvailabilityAdded =? self.onAvailabilityAdded:
# when chronos v4 is implemented, and OnAvailabilityAdded is annotated
# with async:(raises:[]), we can remove this try/catch as we know, with
# certainty, that nothing will be raised
try:
await onAvailabilityAdded(obj)
except CancelledError as e:
raise e
except CatchableError as e:
# we don't have any insight into types of exceptions that
# `onAvailabilityAdded` can raise because it is caller-defined
warn "Unknown error during 'onAvailabilityAdded' callback", error = e.msg
if OnAvailabilitySaved =? self.OnAvailabilitySaved:
await OnAvailabilitySaved(obj)
return res
proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} =
proc update*(
self: Reservations, obj: Reservation
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateImpl(obj)
proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} =
proc update*(
self: Reservations, obj: Availability
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
withLock(self.availabilityLock):
return await self.updateAvailability(obj)
except AsyncLockError as e:
error "Lock error when trying to update the availability", err = e.msg
return failure(e)
proc delete(self: Reservations, key: Key): Future[?!void] {.async.} =
proc delete(
self: Reservations, key: Key
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "deleting object", key
if not await self.exists(key):
@ -341,15 +387,17 @@ proc deleteReservation*(
reservationId: ReservationId,
availabilityId: AvailabilityId,
returnedCollateral: ?UInt256 = UInt256.none,
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
trace "deleting reservation"
without key =? key(reservationId, availabilityId), error:
return failure(error)
try:
withLock(self.availabilityLock):
without reservation =? (await self.get(key, Reservation)), error:
if error of NotExistsError:
@ -357,16 +405,15 @@ proc deleteReservation*(
else:
return failure(error)
if reservation.size > 0.uint64:
trace "returning remaining reservation bytes to availability",
size = reservation.size
without availabilityKey =? availabilityId.key, error:
return failure(error)
without var availability =? await self.get(availabilityKey, Availability), error:
return failure(error)
if reservation.size > 0.uint64:
trace "returning remaining reservation bytes to availability",
size = reservation.size
availability.freeSize += reservation.size
if collateral =? returnedCollateral:
@ -379,6 +426,9 @@ proc deleteReservation*(
return failure(err.toErr(DeleteFailedError))
return success()
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
# TODO: add support for deleting availabilities
# To delete, must not have any active sales.
@ -389,12 +439,20 @@ proc createAvailability*(
duration: uint64,
minPricePerBytePerSecond: UInt256,
totalCollateral: UInt256,
): Future[?!Availability] {.async.} =
enabled: bool,
until: SecondsSince1970,
): Future[?!Availability] {.async: (raises: [CancelledError]).} =
trace "creating availability",
size, duration, minPricePerBytePerSecond, totalCollateral
size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
let availability =
Availability.init(size, size, duration, minPricePerBytePerSecond, totalCollateral)
if until < 0:
let error =
newException(UntilOutOfBoundsError, "Cannot set until to a negative value")
return failure(error)
let availability = Availability.init(
size, size, duration, minPricePerBytePerSecond, totalCollateral, enabled, until
)
let bytes = availability.freeSize
if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption:
@ -418,7 +476,9 @@ method createReservation*(
requestId: RequestId,
slotIndex: uint64,
collateralPerByte: UInt256,
): Future[?!Reservation] {.async, base.} =
validUntil: SecondsSince1970,
): Future[?!Reservation] {.async: (raises: [CancelledError]), base.} =
try:
withLock(self.availabilityLock):
without availabilityKey =? availabilityId.key, error:
return failure(error)
@ -434,9 +494,11 @@ method createReservation*(
)
return failure(error)
trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex
trace "Creating reservation",
availabilityId, slotSize, requestId, slotIndex, validUntil = validUntil
let reservation = Reservation.init(availabilityId, slotSize, requestId, slotIndex)
let reservation =
Reservation.init(availabilityId, slotSize, requestId, slotIndex, validUntil)
if createResErr =? (await self.update(reservation)).errorOption:
return failure(createResErr)
@ -446,10 +508,10 @@ method createReservation*(
availability.freeSize -= slotSize
# adjust the remaining totalRemainingCollateral
availability.totalRemainingCollateral -= slotSize.stuint(256) * collateralPerByte
availability.totalRemainingCollateral -= slotSize.u256 * collateralPerByte
# update availability with reduced size
trace "Updating availability with reduced size"
trace "Updating availability with reduced size", freeSize = availability.freeSize
if updateErr =? (await self.updateAvailability(availability)).errorOption:
trace "Updating availability failed, rolling back reservation creation"
@ -466,17 +528,20 @@ method createReservation*(
trace "Reservation succesfully created"
return success(reservation)
except AsyncLockError as e:
error "Lock error when trying to delete the availability", err = e.msg
return failure(e)
proc returnBytesToAvailability*(
self: Reservations,
availabilityId: AvailabilityId,
reservationId: ReservationId,
bytes: uint64,
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
reservationId
availabilityId
try:
withLock(self.availabilityLock):
without key =? key(reservationId, availabilityId), error:
return failure(error)
@ -519,13 +584,16 @@ proc returnBytesToAvailability*(
return failure(updateErr)
return success()
except AsyncLockError as e:
error "Lock error when returning bytes to the availability", err = e.msg
return failure(e)
proc release*(
self: Reservations,
reservationId: ReservationId,
availabilityId: AvailabilityId,
bytes: uint,
): Future[?!void] {.async.} =
): Future[?!void] {.async: (raises: [CancelledError]).} =
logScope:
topics = "release"
bytes
@ -563,13 +631,9 @@ proc release*(
return success()
iterator items(self: StorableIter): Future[?seq[byte]] =
while not self.finished:
yield self.next()
proc storables(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!StorableIter] {.async.} =
): Future[?!StorableIter] {.async: (raises: [CancelledError]).} =
var iter = StorableIter()
let query = Query.init(queryKey)
when T is Availability:
@ -587,7 +651,7 @@ proc storables(
return failure(error)
# /sales/reservations
proc next(): Future[?seq[byte]] {.async.} =
proc next(): Future[?seq[byte]] {.async: (raises: [CancelledError]).} =
await idleAsync()
iter.finished = results.finished
if not results.finished and res =? (await results.next()) and res.data.len > 0 and
@ -596,7 +660,7 @@ proc storables(
return none seq[byte]
proc dispose(): Future[?!void] {.async.} =
proc dispose(): Future[?!void] {.async: (raises: [CancelledError]).} =
return await results.dispose()
iter.next = next
@ -605,13 +669,14 @@ proc storables(
proc allImpl(
self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey
): Future[?!seq[T]] {.async.} =
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
var ret: seq[T] = @[]
without storables =? (await self.storables(T, queryKey)), error:
return failure(error)
for storable in storables.items:
try:
without bytes =? (await storable):
continue
@ -621,16 +686,23 @@ proc allImpl(
continue
ret.add obj
except CancelledError as err:
raise err
except CatchableError as err:
error "Error when retrieving storable", error = err.msg
continue
return success(ret)
proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} =
proc all*(
self: Reservations, T: type SomeStorableObject
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
return await self.allImpl(T)
proc all*(
self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId
): Future[?!seq[T]] {.async.} =
without key =? (ReservationsKey / $availabilityId):
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
without key =? key(availabilityId):
return failure("no key")
return await self.allImpl(T, key)
@ -639,18 +711,22 @@ proc findAvailability*(
self: Reservations,
size, duration: uint64,
pricePerBytePerSecond, collateralPerByte: UInt256,
): Future[?Availability] {.async.} =
validUntil: SecondsSince1970,
): Future[?Availability] {.async: (raises: [CancelledError]).} =
without storables =? (await self.storables(Availability)), e:
error "failed to get all storables", error = e.msg
return none Availability
for item in storables.items:
if bytes =? (await item) and availability =? Availability.fromJson(bytes):
if size <= availability.freeSize and duration <= availability.duration and
if availability.enabled and size <= availability.freeSize and
duration <= availability.duration and
collateralPerByte <= availability.maxCollateralPerByte and
pricePerBytePerSecond >= availability.minPricePerBytePerSecond:
pricePerBytePerSecond >= availability.minPricePerBytePerSecond and
(availability.until == 0 or availability.until >= validUntil):
trace "availability matched",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
@ -658,7 +734,8 @@ proc findAvailability*(
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until
# TODO: As soon as we're on ARC-ORC, we can use destructors
# to automatically dispose our iterators when they fall out of scope.
@ -670,6 +747,7 @@ proc findAvailability*(
trace "availability did not match",
id = availability.id,
enabled = availability.enabled,
size,
availFreeSize = availability.freeSize,
duration,
@ -677,4 +755,5 @@ proc findAvailability*(
pricePerBytePerSecond,
availMinPricePerBytePerSecond = availability.minPricePerBytePerSecond,
collateralPerByte,
availMaxCollateralPerByte = availability.maxCollateralPerByte
availMaxCollateralPerByte = availability.maxCollateralPerByte,
until = availability.until

View File

@ -2,7 +2,6 @@ import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stint
import pkg/upraises
import ../contracts/requests
import ../errors
import ../logutils
@ -11,6 +10,7 @@ import ./statemachine
import ./salescontext
import ./salesdata
import ./reservations
import ./slotqueue
export reservations
@ -26,10 +26,10 @@ type
onCleanUp*: OnCleanUp
onFilled*: ?OnFilled
OnCleanUp* = proc(
returnBytes = false, reprocessSlot = false, returnedCollateral = UInt256.none
): Future[void] {.gcsafe, upraises: [].}
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
OnCleanUp* = proc(reprocessSlot = false, returnedCollateral = UInt256.none) {.
async: (raises: [])
.}
OnFilled* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
SalesAgentError = object of CodexError
AllSlotsFilledError* = object of SalesAgentError
@ -42,10 +42,16 @@ proc newSalesAgent*(
requestId: RequestId,
slotIndex: uint64,
request: ?StorageRequest,
slotQueueItem = SlotQueueItem.none,
): SalesAgent =
var agent = SalesAgent.new()
agent.context = context
agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request)
agent.data = SalesData(
requestId: requestId,
slotIndex: slotIndex,
request: request,
slotQueueItem: slotQueueItem,
)
return agent
proc retrieveRequest*(agent: SalesAgent) {.async.} =
@ -103,18 +109,15 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} =
error "Error while waiting for expiry to lapse", error = e.msgDetail
data.cancelled = onCancelled()
asyncSpawn data.cancelled
method onFulfilled*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
let cancelled = agent.data.cancelled
if agent.data.requestId == requestId and not cancelled.isNil and not cancelled.finished:
cancelled.cancelSoon()
method onFailed*(
agent: SalesAgent, requestId: RequestId
) {.base, gcsafe, upraises: [].} =
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, raises: [].} =
without request =? agent.data.request:
return
if agent.data.requestId == requestId:
@ -122,7 +125,7 @@ method onFailed*(
method onSlotFilled*(
agent: SalesAgent, requestId: RequestId, slotIndex: uint64
) {.base, gcsafe, upraises: [].} =
) {.base, gcsafe, raises: [].} =
if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex:
agent.schedule(slotFilledEvent(requestId, slotIndex))
@ -133,7 +136,7 @@ proc subscribe*(agent: SalesAgent) {.async.} =
await agent.subscribeCancellation()
agent.subscribed = true
proc unsubscribe*(agent: SalesAgent) {.async.} =
proc unsubscribe*(agent: SalesAgent) {.async: (raises: []).} =
if not agent.subscribed:
return
@ -144,6 +147,6 @@ proc unsubscribe*(agent: SalesAgent) {.async.} =
agent.subscribed = false
proc stop*(agent: SalesAgent) {.async.} =
proc stop*(agent: SalesAgent) {.async: (raises: []).} =
await Machine(agent).stop()
await agent.unsubscribe()

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import pkg/libp2p/cid
import ../market
@ -24,15 +23,20 @@ type
slotQueue*: SlotQueue
simulateProofFailures*: int
BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
BlocksCb* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnStore* = proc(
request: StorageRequest, slot: uint64, blocksCb: BlocksCb
): Future[?!void] {.gcsafe, upraises: [].}
request: StorageRequest,
expiry: SecondsSince1970,
slot: uint64,
blocksCb: BlocksCb,
isRepairing: bool,
): Future[?!void] {.async: (raises: [CancelledError]).}
OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.
gcsafe, upraises: []
async: (raises: [CancelledError])
.}
OnExpiryUpdate* = proc(rootCid: Cid, expiry: SecondsSince1970): Future[?!void] {.
gcsafe, upraises: []
async: (raises: [CancelledError])
.}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, upraises: [].}
OnClear* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}
OnSale* = proc(request: StorageRequest, slotIndex: uint64) {.gcsafe, raises: [].}

View File

@ -2,6 +2,7 @@ import pkg/chronos
import ../contracts/requests
import ../market
import ./reservations
import ./slotqueue
type SalesData* = ref object
requestId*: RequestId
@ -10,3 +11,4 @@ type SalesData* = ref object
slotIndex*: uint64
cancelled*: Future[void]
reservation*: ?Reservation
slotQueueItem*: ?SlotQueueItem

View File

@ -3,9 +3,7 @@ import std/tables
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../errors
import ../clock
import ../logutils
import ../rng
import ../utils
@ -17,25 +15,21 @@ logScope:
topics = "marketplace slotqueue"
type
OnProcessSlot* =
proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].}
OnProcessSlot* = proc(item: SlotQueueItem): Future[void] {.async: (raises: []).}
# Non-ref obj copies value when assigned, preventing accidental modification
# of values which could cause an incorrect order (eg
# ``slotQueue[1].collateral = 1`` would cause ``collateral`` to be updated,
# but the heap invariant would no longer be honoured. When non-ref, the
# compiler can ensure that statement will fail).
SlotQueueWorker = object
doneProcessing*: Future[void]
SlotQueueItem* = object
requestId: RequestId
slotIndex: uint16
slotSize: uint64
duration: uint64
pricePerBytePerSecond: UInt256
collateralPerByte: UInt256
expiry: uint64
collateral: UInt256 # Collateral computed
expiry: ?uint64
seen: bool
# don't need to -1 to prevent overflow when adding 1 (to always allow push)
@ -47,7 +41,6 @@ type
onProcessSlot: ?OnProcessSlot
queue: AsyncHeapQueue[SlotQueueItem]
running: bool
workers: AsyncQueue[SlotQueueWorker]
trackedFutures: TrackedFutures
unpaused: AsyncEvent
@ -76,9 +69,6 @@ proc profitability(item: SlotQueueItem): UInt256 =
slotSize: item.slotSize,
).pricePerSlot
proc collateralPerSlot(item: SlotQueueItem): UInt256 =
StorageAsk(collateralPerByte: item.collateralPerByte, slotSize: item.slotSize).collateralPerSlot
proc `<`*(a, b: SlotQueueItem): bool =
# for A to have a higher priority than B (in a min queue), A must be less than
# B.
@ -95,11 +85,12 @@ proc `<`*(a, b: SlotQueueItem): bool =
scoreA.addIf(a.profitability > b.profitability, 3)
scoreB.addIf(a.profitability < b.profitability, 3)
scoreA.addIf(a.collateralPerSlot < b.collateralPerSlot, 2)
scoreB.addIf(a.collateralPerSlot > b.collateralPerSlot, 2)
scoreA.addIf(a.collateral < b.collateral, 2)
scoreB.addIf(a.collateral > b.collateral, 2)
scoreA.addIf(a.expiry > b.expiry, 1)
scoreB.addIf(a.expiry < b.expiry, 1)
if expiryA =? a.expiry and expiryB =? b.expiry:
scoreA.addIf(expiryA > expiryB, 1)
scoreB.addIf(expiryA < expiryB, 1)
return scoreA > scoreB
@ -128,15 +119,13 @@ proc new*(
# avoid instantiating `workers` in constructor to avoid side effects in
# `newAsyncQueue` procedure
proc init(_: type SlotQueueWorker): SlotQueueWorker =
SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing"))
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
expiry: ?uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem(
@ -145,25 +134,43 @@ proc init*(
slotSize: ask.slotSize,
duration: ask.duration,
pricePerBytePerSecond: ask.pricePerBytePerSecond,
collateralPerByte: ask.collateralPerByte,
collateral: collateral,
expiry: expiry,
seen: seen,
)
proc init*(
_: type SlotQueueItem, request: StorageRequest, slotIndex: uint16
_: type SlotQueueItem,
requestId: RequestId,
slotIndex: uint16,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
seen = false,
): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry)
SlotQueueItem.init(requestId, slotIndex, ask, some expiry, collateral, seen)
proc init*(
_: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: uint64
): seq[SlotQueueItem] =
_: type SlotQueueItem,
request: StorageRequest,
slotIndex: uint16,
collateral: UInt256,
): SlotQueueItem =
SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry, collateral)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: ?uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
if not ask.slots.inRange:
raise newException(SlotsOutOfRangeError, "Too many slots")
var i = 0'u16
proc initSlotQueueItem(): SlotQueueItem =
let item = SlotQueueItem.init(requestId, i, ask, expiry)
let item = SlotQueueItem.init(requestId, i, ask, expiry, collateral)
inc i
return item
@ -171,8 +178,19 @@ proc init*(
Rng.instance.shuffle(items)
return items
proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, request.expiry)
proc init*(
_: type SlotQueueItem,
requestId: RequestId,
ask: StorageAsk,
expiry: uint64,
collateral: UInt256,
): seq[SlotQueueItem] {.raises: [SlotsOutOfRangeError].} =
SlotQueueItem.init(requestId, ask, some expiry, collateral)
proc init*(
_: type SlotQueueItem, request: StorageRequest, collateral: UInt256
): seq[SlotQueueItem] =
return SlotQueueItem.init(request.id, request.ask, uint64.none, collateral)
proc inRange*(val: SomeUnsignedInt): bool =
val.uint16 in SlotQueueSize.low .. SlotQueueSize.high
@ -198,6 +216,9 @@ proc collateralPerByte*(self: SlotQueueItem): UInt256 =
proc seen*(self: SlotQueueItem): bool =
self.seen
proc `seen=`*(self: var SlotQueueItem, seen: bool) =
self.seen = seen
proc running*(self: SlotQueue): bool =
self.running
@ -216,13 +237,6 @@ proc `$`*(self: SlotQueue): string =
proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) =
self.onProcessSlot = some onProcessSlot
proc activeWorkers*(self: SlotQueue): int =
if not self.running:
return 0
# active = capacity - available
self.maxWorkers - self.workers.len
proc contains*(self: SlotQueue, item: SlotQueueItem): bool =
self.queue.contains(item)
@ -234,25 +248,7 @@ proc unpause*(self: SlotQueue) =
# set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait()
self.unpaused.fire()
proc populateItem*(
self: SlotQueue, requestId: RequestId, slotIndex: uint16
): ?SlotQueueItem =
trace "populate item, items in queue", len = self.queue.len
for item in self.queue.items:
trace "populate item search", itemRequestId = item.requestId, requestId
if item.requestId == requestId:
return some SlotQueueItem(
requestId: requestId,
slotIndex: slotIndex,
slotSize: item.slotSize,
duration: item.duration,
pricePerBytePerSecond: item.pricePerBytePerSecond,
collateralPerByte: item.collateralPerByte,
expiry: item.expiry,
)
return none SlotQueueItem
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void =
proc push*(self: SlotQueue, item: SlotQueueItem): ?!void {.raises: [].} =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
@ -324,52 +320,6 @@ proc delete*(self: SlotQueue, requestId: RequestId) =
proc `[]`*(self: SlotQueue, i: Natural): SlotQueueItem =
self.queue[i]
proc addWorker(self: SlotQueue): ?!void =
if not self.running:
let err = newException(QueueNotRunningError, "queue must be running")
return failure(err)
trace "adding new worker to worker queue"
let worker = SlotQueueWorker.init()
try:
self.trackedFutures.track(worker.doneProcessing)
self.workers.addLastNoWait(worker)
except AsyncQueueFullError:
return failure("failed to add worker, worker queue full")
return success()
proc dispatch(
self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem
) {.async: (raises: []).} =
logScope:
requestId = item.requestId
slotIndex = item.slotIndex
if not self.running:
warn "Could not dispatch worker because queue is not running"
return
if onProcessSlot =? self.onProcessSlot:
try:
self.trackedFutures.track(worker.doneProcessing)
await onProcessSlot(item, worker.doneProcessing)
await worker.doneProcessing
if err =? self.addWorker().errorOption:
raise err # catch below
except QueueNotRunningError as e:
info "could not re-add worker to worker queue, queue not running", error = e.msg
except CancelledError:
# do not bubble exception up as it is called with `asyncSpawn` which would
# convert the exception into a `FutureDefect`
discard
except CatchableError as e:
# we don't have any insight into types of errors that `onProcessSlot` can
# throw because it is caller-defined
warn "Unknown error processing slot in worker", error = e.msg
proc clearSeenFlags*(self: SlotQueue) =
# Enumerate all items in the queue, overwriting each item with `seen = false`.
# To avoid issues with new queue items being pushed to the queue while all
@ -387,7 +337,8 @@ proc clearSeenFlags*(self: SlotQueue) =
trace "all 'seen' flags cleared"
proc run(self: SlotQueue) {.async: (raises: []).} =
proc runWorker(self: SlotQueue) {.async: (raises: []).} =
trace "slot queue worker loop started"
while self.running:
try:
if self.paused:
@ -396,8 +347,6 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
# block until unpaused is true/fired, ie wait for queue to be unpaused
await self.unpaused.wait()
let worker =
await self.workers.popFirst() # if workers saturated, wait here for new workers
let item = await self.queue.pop() # if queue empty, wait here for new items
logScope:
@ -420,24 +369,19 @@ proc run(self: SlotQueue) {.async: (raises: []).} =
# immediately (with priority over other items) once unpaused
trace "readding seen item back into the queue"
discard self.push(item) # on error, drop the item and continue
worker.doneProcessing.complete()
if err =? self.addWorker().errorOption:
error "error adding new worker", error = err.msg
await sleepAsync(1.millis) # poll
continue
trace "processing item"
without onProcessSlot =? self.onProcessSlot:
raiseAssert "slot queue onProcessSlot not set"
let fut = self.dispatch(worker, item)
self.trackedFutures.track(fut)
asyncSpawn fut
await sleepAsync(1.millis) # poll
await onProcessSlot(item)
except CancelledError:
trace "slot queue cancelled"
trace "slot queue worker cancelled"
break
except CatchableError as e: # raised from self.queue.pop() or self.workers.pop()
warn "slot queue error encountered during processing", error = e.msg
except CatchableError as e: # raised from self.queue.pop()
warn "slot queue worker error encountered during processing", error = e.msg
trace "slot queue worker loop stopped"
proc start*(self: SlotQueue) =
if self.running:
@ -447,18 +391,11 @@ proc start*(self: SlotQueue) =
self.running = true
# must be called in `start` to avoid sideeffects in `new`
self.workers = newAsyncQueue[SlotQueueWorker](self.maxWorkers)
# Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its
# task, a new worker will be pushed to the queue
for i in 0 ..< self.maxWorkers:
if err =? self.addWorker().errorOption:
error "start: error adding new worker", error = err.msg
let fut = self.run()
self.trackedFutures.track(fut)
asyncSpawn fut
let worker = self.runWorker()
self.trackedFutures.track(worker)
proc stop*(self: SlotQueue) {.async.} =
if not self.running:

View File

@ -1,5 +1,4 @@
import pkg/questionable
import pkg/upraises
import ../errors
import ../utils/asyncstatemachine
import ../market
@ -12,21 +11,21 @@ export asyncstatemachine
type
SaleState* = ref object of State
SaleError* = ref object of CodexError
SaleError* = object of CodexError
method onCancelled*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onFailed*(
state: SaleState, request: StorageRequest
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
method onSlotFilled*(
state: SaleState, requestId: RequestId, slotIndex: uint64
): ?State {.base, upraises: [].} =
): ?State {.base, raises: [].} =
discard
proc cancelledEvent*(request: StorageRequest): Event =

View File

@ -12,6 +12,14 @@ type SaleCancelled* = ref object of SaleState
method `$`*(state: SaleCancelled): string =
"SaleCancelled"
proc slotIsFilledByMe(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async: (raises: [CancelledError, MarketError]).} =
let host = await market.getHost(requestId, slotIndex)
let me = await market.getSigner()
return host == me.some
method run*(
state: SaleCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
@ -23,21 +31,27 @@ method run*(
raiseAssert "no sale request"
try:
let slot = Slot(request: request, slotIndex: data.slotIndex)
var returnedCollateral = UInt256.none
if await slotIsFilledByMe(market, data.requestId, data.slotIndex):
debug "Collecting collateral and partial payout",
requestId = data.requestId, slotIndex = data.slotIndex
let slot = Slot(request: request, slotIndex: data.slotIndex)
let currentCollateral = await market.currentCollateral(slot.id)
try:
await market.freeSlot(slot.id)
except SlotStateMismatchError as e:
warn "Failed to free slot because slot is already free", error = e.msg
returnedCollateral = currentCollateral.some
if onClear =? agent.context.onClear and request =? data.request:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(
returnBytes = true,
reprocessSlot = false,
returnedCollateral = some currentCollateral,
)
await onCleanUp(reprocessSlot = false, returnedCollateral = returnedCollateral)
warn "Sale cancelled due to timeout",
requestId = data.requestId, slotIndex = data.slotIndex

View File

@ -38,6 +38,7 @@ method run*(
let agent = SalesAgent(machine)
let data = agent.data
let context = agent.context
let market = context.market
let reservations = context.reservations
without onStore =? context.onStore:
@ -55,7 +56,9 @@ method run*(
reservationId = reservation.id
availabilityId = reservation.availabilityId
proc onBlocks(blocks: seq[bt.Block]): Future[?!void] {.async.} =
proc onBlocks(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
# release batches of blocks as they are written to disk and
# update availability size
var bytes: uint = 0
@ -67,8 +70,21 @@ method run*(
return await reservations.release(reservation.id, reservation.availabilityId, bytes)
try:
let requestId = request.id
let slotId = slotId(requestId, data.slotIndex)
let requestState = await market.requestState(requestId)
let isRepairing = (await market.slotState(slotId)) == SlotState.Repair
trace "Retrieving expiry"
var expiry: SecondsSince1970
if state =? requestState and state == RequestState.Started:
expiry = await market.getRequestEnd(requestId)
else:
expiry = await market.requestExpiresAt(requestId)
trace "Starting download"
if err =? (await onStore(request, data.slotIndex, onBlocks)).errorOption:
if err =?
(await onStore(request, expiry, data.slotIndex, onBlocks, isRepairing)).errorOption:
return some State(SaleErrored(error: err, reprocessSlot: false))
trace "Download complete"

View File

@ -1,6 +1,5 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ../statemachine
import ../salesagent
@ -34,7 +33,7 @@ method run*(
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(returnBytes = true, reprocessSlot = state.reprocessSlot)
await onCleanUp(reprocessSlot = state.reprocessSlot)
except CancelledError as e:
trace "SaleErrored.run was cancelled", error = e.msgDetail
except CatchableError as e:

View File

@ -28,6 +28,7 @@ method run*(
let slot = Slot(request: request, slotIndex: data.slotIndex)
debug "Removing slot from mySlots",
requestId = data.requestId, slotIndex = data.slotIndex
await market.freeSlot(slot.id)
let error = newException(SaleFailedError, "Sale failed")

View File

@ -11,7 +11,7 @@ import ./cancelled
import ./failed
import ./proving
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import ./provingsimulated
logScope:
@ -59,7 +59,7 @@ method run*(
if err =? (await onExpiryUpdate(request.content.cid, requestEnd)).errorOption:
return some State(SaleErrored(error: err))
when codex_enable_proof_failures:
when storage_enable_proof_failures:
if context.simulateProofFailures > 0:
info "Proving with failure rate", rate = context.simulateProofFailures
return some State(

View File

@ -30,6 +30,7 @@ method run*(
): Future[?State] {.async: (raises: []).} =
let data = SalesAgent(machine).data
let market = SalesAgent(machine).context.market
without (request =? data.request):
raiseAssert "Request not set"
@ -38,27 +39,19 @@ method run*(
slotIndex = data.slotIndex
try:
let slotState = await market.slotState(slotId(data.requestId, data.slotIndex))
let requestedCollateral = request.ask.collateralPerSlot
var collateral: UInt256
if slotState == SlotState.Repair:
# When repairing the node gets "discount" on the collateral that it needs to
let repairRewardPercentage = (await market.repairRewardPercentage).u256
collateral =
requestedCollateral -
((requestedCollateral * repairRewardPercentage)).div(100.u256)
else:
collateral = requestedCollateral
without collateral =? await market.slotCollateral(data.requestId, data.slotIndex),
err:
error "Failure attempting to fill slot: unable to calculate collateral",
error = err.msg
return some State(SaleErrored(error: err))
debug "Filling slot"
try:
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
except MarketError as e:
if e.msg.contains "Slot is not free":
except SlotStateMismatchError as e:
debug "Slot is already filled, ignoring slot"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except MarketError as e:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState

View File

@ -36,6 +36,9 @@ method run*(
requestId = data.requestId, slotIndex = data.slotIndex
try:
if onClear =? agent.context.onClear:
onClear(request, data.slotIndex)
if onCleanUp =? agent.onCleanUp:
await onCleanUp(returnedCollateral = state.returnedCollateral)
except CancelledError as e:

View File

@ -14,7 +14,7 @@ logScope:
type SaleIgnored* = ref object of SaleState
reprocessSlot*: bool # readd slot to queue with `seen` flag
returnBytes*: bool # return unreleased bytes from Reservation to Availability
returnsCollateral*: bool # returns collateral when a reservation was created
method `$`*(state: SaleIgnored): string =
"SaleIgnored"
@ -23,11 +23,26 @@ method run*(
state: SaleIgnored, machine: Machine
): Future[?State] {.async: (raises: []).} =
let agent = SalesAgent(machine)
let data = agent.data
let market = agent.context.market
without request =? data.request:
raiseAssert "no sale request"
var returnedCollateral = UInt256.none
try:
if state.returnsCollateral:
# The returnedCollateral is needed because a reservation could
# be created and the collateral assigned to that reservation.
# The returnedCollateral will be used in the cleanup function
# and be passed to the deleteReservation function.
let slot = Slot(request: request, slotIndex: data.slotIndex)
returnedCollateral = request.ask.collateralPerSlot.some
if onCleanUp =? agent.onCleanUp:
await onCleanUp(
reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes
reprocessSlot = state.reprocessSlot, returnedCollateral = returnedCollateral
)
except CancelledError as e:
trace "SaleIgnored.run was cancelled", error = e.msgDetail

View File

@ -51,15 +51,17 @@ method run*(
await agent.subscribe()
without request =? data.request:
raiseAssert "no sale request"
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let state = await market.slotState(slotId)
if state != SlotState.Free and state != SlotState.Repair:
return some State(SaleIgnored(reprocessSlot: false, returnBytes: false))
return some State(SaleIgnored(reprocessSlot: false))
# TODO: Once implemented, check to ensure the host is allowed to fill the slot,
# due to the [sliding window mechanism](https://github.com/codex-storage/codex-research/blob/master/design/marketplace.md#dispersal)
# due to the [sliding window mechanism](https://github.com/logos-storage/logos-storage-research/blob/master/design/marketplace.md#dispersal)
logScope:
slotIndex = data.slotIndex
@ -68,10 +70,12 @@ method run*(
pricePerBytePerSecond = request.ask.pricePerBytePerSecond
collateralPerByte = request.ask.collateralPerByte
let requestEnd = await market.getRequestEnd(data.requestId)
without availability =?
await reservations.findAvailability(
request.ask.slotSize, request.ask.duration, request.ask.pricePerBytePerSecond,
request.ask.collateralPerByte,
request.ask.collateralPerByte, requestEnd,
):
debug "No availability found for request, ignoring"
@ -80,9 +84,9 @@ method run*(
info "Availability found for request, creating reservation"
without reservation =?
await reservations.createReservation(
await noCancel reservations.createReservation(
availability.id, request.ask.slotSize, request.id, data.slotIndex,
request.ask.collateralPerByte,
request.ask.collateralPerByte, requestEnd,
), error:
trace "Creation of reservation failed"
# Race condition:

View File

@ -1,5 +1,5 @@
import ../../conf
when codex_enable_proof_failures:
when storage_enable_proof_failures:
import std/strutils
import pkg/stint
import pkg/ethers
@ -40,7 +40,7 @@ when codex_enable_proof_failures:
try:
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
await market.submitProof(slot.id, Groth16Proof.default)
except Proofs_InvalidProof as e:
except ProofInvalidError as e:
discard # expected
except CancelledError as error:
raise error

View File

@ -44,11 +44,10 @@ method run*(
try:
trace "Reserving slot"
await market.reserveSlot(data.requestId, data.slotIndex)
except MarketError as e:
if e.msg.contains "SlotReservations_ReservationNotAllowed":
except SlotReservationNotAllowedError as e:
debug "Slot cannot be reserved, ignoring", error = e.msg
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
else:
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except MarketError as e:
return some State(SaleErrored(error: e))
# other CatchableErrors are handled "automatically" by the SaleState
@ -58,7 +57,7 @@ method run*(
# do not re-add this slot to the queue, and return bytes from Reservation to
# the Availability
debug "Slot cannot be reserved, ignoring"
return some State(SaleIgnored(reprocessSlot: false, returnBytes: true))
return some State(SaleIgnored(reprocessSlot: false, returnsCollateral: true))
except CancelledError as e:
trace "SaleSlotReserving.run was cancelled", error = e.msgDetail
except CatchableError as e:

View File

@ -38,6 +38,11 @@ method run*(
await agent.retrieveRequest()
await agent.subscribe()
without request =? data.request:
error "request could not be retrieved", id = data.requestId
let error = newException(SaleError, "request could not be retrieved")
return some State(SaleErrored(error: error))
let slotId = slotId(data.requestId, data.slotIndex)
let slotState = await market.slotState(slotId)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -113,17 +113,17 @@ func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural =
self.numBlockCells * self.numSlotBlocks
func slotIndiciesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] =
## Returns the slot indices.
##
self.strategy.getIndicies(slot).catch
self.strategy.getIndices(slot).catch
func slotIndicies*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] =
## Returns the slot indices.
##
if iter =? self.strategy.getIndicies(slot).catch:
if iter =? self.strategy.getIndices(slot).catch:
return toSeq(iter)
func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
@ -134,7 +134,7 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest =
proc buildBlockTree*[T, H](
self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural
): Future[?!(seq[byte], T)] {.async.} =
): Future[?!(seq[byte], T)] {.async: (raises: [CancelledError]).} =
## Build the block digest tree and return a tuple with the
## block data and the tree.
##
@ -167,7 +167,7 @@ proc buildBlockTree*[T, H](
proc getCellHashes*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!seq[H]] {.async.} =
): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} =
## Collect all the cells from a block and return
## their hashes.
##
@ -184,7 +184,7 @@ proc getCellHashes*[T, H](
slotIndex = slotIndex
let hashes = collect(newSeq):
for i, blkIdx in self.strategy.getIndicies(slotIndex):
for i, blkIdx in self.strategy.getIndices(slotIndex):
logScope:
blkIdx = blkIdx
pos = i
@ -202,19 +202,23 @@ proc getCellHashes*[T, H](
proc buildSlotTree*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!T] {.async.} =
): Future[?!T] {.async: (raises: [CancelledError]).} =
## Build the slot tree from the block digest hashes
## and return the tree.
try:
without cellHashes =? (await self.getCellHashes(slotIndex)), err:
error "Failed to select slot blocks", err = err.msg
return failure(err)
T.init(cellHashes)
except IndexingError as err:
error "Failed to build slot tree", err = err.msg
return failure(err)
proc buildSlot*[T, H](
self: SlotsBuilder[T, H], slotIndex: Natural
): Future[?!H] {.async.} =
): Future[?!H] {.async: (raises: [CancelledError]).} =
## Build a slot tree and store the proofs in
## the block store.
##
@ -250,7 +254,9 @@ proc buildSlot*[T, H](
func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T =
T.init(@slotRoots)
proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
proc buildSlots*[T, H](
self: SlotsBuilder[T, H]
): Future[?!void] {.async: (raises: [CancelledError]).} =
## Build all slot trees and store them in the block store.
##
@ -280,7 +286,9 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} =
success()
proc buildManifest*[T, H](self: SlotsBuilder[T, H]): Future[?!Manifest] {.async.} =
proc buildManifest*[T, H](
self: SlotsBuilder[T, H]
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
if err =? (await self.buildSlots()).errorOption:
error "Failed to build slot roots", err = err.msg
return failure(err)
@ -302,7 +310,7 @@ proc new*[T, H](
_: type SlotsBuilder[T, H],
store: BlockStore,
manifest: Manifest,
strategy = SteppedStrategy,
strategy = LinearStrategy,
cellSize = DefaultCellSize,
): ?!SlotsBuilder[T, H] =
if not manifest.protected:
@ -315,13 +323,15 @@ proc new*[T, H](
cellSize = cellSize
if (manifest.blocksCount mod manifest.numSlots) != 0:
trace "Number of blocks must be divisable by number of slots."
return failure("Number of blocks must be divisable by number of slots.")
const msg = "Number of blocks must be divisible by number of slots."
trace msg
return failure(msg)
let cellSize = if manifest.verifiable: manifest.cellSize else: cellSize
if (manifest.blockSize mod cellSize) != 0.NBytes:
trace "Block size must be divisable by cell size."
return failure("Block size must be divisable by cell size.")
const msg = "Block size must be divisible by cell size."
trace msg
return failure(msg)
let
numSlotBlocks = manifest.numSlotBlocks
@ -344,7 +354,14 @@ proc new*[T, H](
emptyBlock = newSeq[byte](manifest.blockSize.int)
emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int)
strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch
strategy =
?strategy.init(
0,
manifest.blocksCount - 1,
manifest.numSlots,
manifest.numSlots,
numPadSlotBlocks,
).catch
logScope:
numSlotBlocks = numSlotBlocks

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -38,7 +38,9 @@ type
AnyProof* = CircomProof
AnySampler* = Poseidon2Sampler
# add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler
AnyBuilder* = Poseidon2Builder
# add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder
AnyProofInputs* = ProofInputs[Poseidon2Hash]
Prover* = ref object of RootObj
@ -48,7 +50,7 @@ type
proc prove*(
self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge
): Future[?!(AnyProofInputs, AnyProof)] {.async.} =
): Future[?!(AnyProofInputs, AnyProof)] {.async: (raises: [CancelledError]).} =
## Prove a statement using backend.
## Returns a future that resolves to a proof.
@ -80,7 +82,7 @@ proc prove*(
proc verify*(
self: Prover, proof: AnyProof, inputs: AnyProofInputs
): Future[?!bool] {.async.} =
): Future[?!bool] {.async: (raises: [CancelledError]).} =
## Prove a statement using backend.
## Returns a future that resolves to a proof.
self.backend.verify(proof, inputs)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -48,12 +48,12 @@ func getCell*[T, H](
proc getSample*[T, H](
self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H
): Future[?!Sample[H]] {.async.} =
): Future[?!Sample[H]] {.async: (raises: [CancelledError]).} =
let
cellsPerBlock = self.builder.numBlockCells
blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index
blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index
origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx]
origBlockIdx = self.builder.slotIndices(self.index)[blkSlotIdx]
# convert to original dataset block index
logScope:
@ -81,7 +81,7 @@ proc getSample*[T, H](
proc getProofInput*[T, H](
self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural
): Future[?!ProofInputs[H]] {.async.} =
): Future[?!ProofInputs[H]] {.async: (raises: [CancelledError]).} =
## Generate proofs as input to the proving circuit.
##

Some files were not shown because too many files have changed in this diff Show More