Compare commits

...

250 Commits

Author SHA1 Message Date
Arnaud
60861d6af8
chore: rename codex to logos storage (#1359) 2025-12-18 17:23:09 +00:00
Eric
49e801803f
ci: remove dist tests and devnet deployment (#1338) 2025-12-17 06:03:59 +00:00
Jacek Sieka
858101c74c
chore: bump eth & networking (#1353) 2025-12-15 10:00:51 +00:00
Jacek Sieka
bd49591fff
chore: bump *-serialization (#1352) 2025-12-12 08:03:56 +00:00
Jacek Sieka
6765beee2c
chore: assorted bumps (#1351) 2025-12-11 21:03:36 +00:00
Jacek Sieka
45fec4b524
chore: bump libbacktrace (#1349) 2025-12-11 20:42:53 +00:00
Jacek Sieka
9ac9f6ff3c
chore: drop usage of upraises (#1348) 2025-12-11 09:03:43 +00:00
Arnaud
bd36032251
feat: add c binding (#1322)
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 07:34:09 +00:00
Chrysostomos Nanakos
be759baf4d
feat: Block exchange optimizations (#1325)
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
Co-authored-by: gmega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-11-13 05:47:02 +00:00
Eric
6147a751f1
fix(ci): Remove macos amd release build (#1337) 2025-11-13 05:37:43 +00:00
Eric
ee47ca8760
feat(libs): Use libp2p multiformats extensions instead of a rolling branch (#1329) 2025-11-13 04:48:33 +00:00
Eric
f791a960f2
fix(ci): Windows SIGILL in CI (#1339) 2025-11-03 11:45:02 +00:00
Arnaud
db8f866db4
feat: check if CID exists in local store (#1331) 2025-11-02 04:32:47 +00:00
Eric
7aca2f0e61
fix(ci): Move conventional commits job to workflow (#1340) 2025-11-02 04:00:55 +00:00
Eric
072bff5cab
fix: ci integration tests (#1335) 2025-10-30 19:38:11 +11:00
Arnaud
af55a761e6
chore: skip marketplace and long integration tests (#1326) 2025-10-22 19:22:33 +11:00
Adam Uhlíř
e3d8d195c3
chore: update nim-libp2p (#1323) 2025-10-01 13:19:15 +02:00
Slava
d1f2e2399b
ci: validate pr title to adhere conventional commits (#1254) 2025-08-12 08:51:41 +00:00
Slava
8cd10edb69
ci: auto deploy codex on devnet (#1302) 2025-07-28 10:02:19 +00:00
Slava
6cf99e255c
ci: release master builds and upload them to the cloud (#1298) 2025-07-10 11:17:11 +00:00
Dmitriy Ryajov
7eb2fb12cc
make default dirs runtime, not compile time. (#1292) 2025-06-26 18:44:24 +00:00
Slava
352273ff81
chore: bump codex-contracts-eth (#1293) 2025-06-26 18:09:48 +00:00
Slava
9ef9258720
chore(ci): bump node to v22 (#1285) 2025-06-26 01:11:00 +00:00
markspanbroek
7927afe715
chore: update nph dependency (#1279)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-25 10:30:48 +00:00
markspanbroek
01615354af
refactor(ci): run integration tests in parallel by spinning up more runners (#1287) 2025-06-25 08:56:16 +00:00
Chrysostomos Nanakos
baff902137
fix: resolve shared block request cancellation conflicts (#1284) 2025-06-24 15:05:25 +00:00
markspanbroek
4d44154a40
fix(ci): remove "update" to gcc-14 on windows (#1288) 2025-06-24 09:00:56 +00:00
markspanbroek
e1c397e112
fix(tests): auto import all tests files and fix forgotten tests (#1281) 2025-06-23 11:18:59 +00:00
Arnaud
7b660e3554
chore(marketplace): use hardhat ignition (#1195) 2025-06-20 15:55:00 +00:00
Arnaud
c5e424ff1b
feat(marketplace) - add status l2 (Linea) network (#1160) 2025-06-20 12:30:40 +00:00
Slava
36f64ad3e6
chore: update testnet marketplace address (#1283) 2025-06-20 06:13:58 +00:00
Ben Bierens
235c0ec842
chore: updates codex-contracts-eth submodule (#1278)
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2025-06-19 10:31:52 +00:00
Arnaud
d443df441d
chore: improve marketplace integration tests (#1268) 2025-06-19 06:36:10 +00:00
Arnaud
e35aec7870
chore: increase gas limits (#1272) 2025-06-18 12:18:56 +00:00
Slava
93e4e0f177
ci(docker): add stable tag for dist-tests images (#1273) 2025-06-16 16:22:09 +00:00
Slava
6db6bf5f72
feat(docker): adjust entrypoint (#1271)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-06-14 04:25:29 +00:00
Chrysostomos Nanakos
b305e00160
Add support for slot reconstruction on unavailable slot detection (#1235)
Co-authored-by: Arnaud <arnaud@status.im>
2025-06-12 22:19:42 +00:00
Slava
3d2d8273e6
chore: update testnet marketplace address (#1270) 2025-06-12 08:10:22 +00:00
Slava
e324ac8ca5
feat(docker): add codex network support for docker-entrypoint (#1262)
Co-authored-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
2025-06-11 14:02:39 +00:00
Adam Uhlíř
f267d99ea8
ci: docker stable tag (#1265) 2025-06-11 13:49:39 +00:00
Ben Bierens
8af73e02a9
bumps to latest nim-json-rpc (#1267) 2025-06-11 11:58:49 +00:00
markspanbroek
27d807a841
fix(sales): fix marketplace block expiry (#1258) 2025-06-11 11:27:09 +00:00
Ben Bierens
85823342e9
Improves logging in maintenance module and erasure. (#1264) 2025-06-10 13:27:52 +00:00
Ben Bierens
09a8419942
bumps codex-contracts-eth (#1261) 2025-06-10 09:18:04 +00:00
Adam Uhlíř
7502b9ad2c
feat(cirdl): auto-discovery of marketplace contract (#1259) 2025-06-09 10:04:15 +00:00
Arnaud
3e17207a0b
feat(marketplace) - add command line arg for maxPriorityFeePerGas (#1189) 2025-06-05 07:47:39 +00:00
Eric
1bea94c390
fix(tests): fetching past contract events test (#1255) 2025-06-04 20:36:09 -07:00
markspanbroek
ffbbee01b1
fix(purchasing): fix crash completing future more than once (#1249) 2025-06-04 14:15:07 +00:00
markspanbroek
2dd436bfb7
fix(sales): do not crash when retrieving request fails (#1248) 2025-06-04 11:22:14 +00:00
Arnaud
2e1306ac2d
chore: fix custom error handling when simulating invalid proofs (#1217)
* Fix custom error handling when simulating invalid proofs

* Update error message
2025-06-03 12:11:18 +00:00
Arnaud
45ade0e3c1
chore(marketplace): use canMarkProofAsMissing (#1188)
* Add canProofBeMarkedAsMissing

* Add more tests

* Update contracts submodule
2025-06-03 09:08:57 +00:00
Arnaud
ca869f6dce
fix(availabilities): use totalRemainingCollateral instead of totalCollateral for comparaison (#1229)
* Use totalRemainingCollateral instead of totalCollateral to compare the availability changes

* Update test to use totalRemainingCollateral instead of totalCollateral when testing OnAvailabilitySaved

* Reduce poll interval

* Fix flaky test

* Fix format
2025-06-02 16:47:12 +00:00
Slava
e43872d0b8
chore: update testnet marketplace address (#1245)
https://github.com/codex-storage/nim-codex/issues/1241
2025-05-30 09:12:55 +00:00
Giuliano Mega
d59c5b023c
chore: bump Nim to 2.2.4 (#1242)
* chore: bump Nim to 2.2.4

* fix: resolve symbol ambiguity and drop auto type

* fix: use reference to task instead of pointer or the compiler will deallocate `task` before the encoding/decoding is done

* fix: convention that maxCollateralPerByte equals totalRemainingCollateral when freeSize is 0 to avoid DivByZeroDefect

* fix: bump compiler version in CI pipeline as well
2025-05-29 16:37:38 -07:00
Arnaud
28a83db69e
chore: returns the collateral when a slot is reserved but not filled (#1216)
* Change token allowance method because increaseAllowance does not exist anymore

* Returns collateral when a reservation is deleted and not only a slot is filled

* Remove the returnedCollateral when the slot is not filled by the host

* Add returnedCollateral when the sale is ignored

* Add returnsCollateral variable for ignored state

* Rebase the contracts submodule on the master

* Add integration test

* Fix duration

* Remove unnecessary teardown function

* Remove misleading comment

* Get returned collateral from the request

* Enable logs to debug on CI

* Fix test

* Increase test timeout

* Fix typo

* Fix rebase
2025-05-29 14:47:37 +00:00
Slava
13811825b3
ci: use macos arm runners (#1174)
* ci: use inputs instead of matrix in a ccache key

* ci: switch to arm runners for macos

* ci: use node 20

* ci: pass cpu to a composite action
2025-05-29 10:17:46 +00:00
Arnaud
827d9ccccf
Update contracts (#1238) 2025-05-29 08:27:41 +00:00
Arnaud
c689542579
fix: sales cleanup cancellation (#1234)
* fix(sales): handle cancellation of slot cleanup

Ensures that processing slots from the slot queue
continues even when cleanup of a slot is cancelled.

Co-Authored-By: Eric <5089238+emizzle@users.noreply.github.com>

* chore(reservations): add more `raises` annotations

* Fix cleanup cancellation

* Add remove-agent to trackedfutures instead of the cleanup function

* Increase the timeout to match the request expiry

* Enable logs to debug on CI

* Remove useless except and do not return when add item back to slot queue fails

* Reduce poll interval to detect sale cancelled state

* Avoid cancelling cleanup routine

* Do not cancel creating reservation in order to avoid inconsistent state

* Remove useless try except

---------

Co-authored-by: Mark Spanbroek <mark@spanbroek.net>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-05-29 06:57:05 +00:00
munna0908
71422f0d3d
fix: Support for mapping multiple listener address (#1236)
* multi address mapping support

* fix thread issues

* fix local thread var issue

* chore: rename stopNatThread to stopNatThreads

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-27 09:05:43 +00:00
markspanbroek
25a8077e80
fix(integration): fix api validation test (#1201)
* integration: shutdown codex node at end of test

On Windows the codex node did not shut down properly after this test
finished.

* contracts: fix flaky test
2025-05-26 16:49:53 +00:00
Ben Bierens
bfbd7264df
Adds missing async-raises for prover.verify (#1237) 2025-05-26 15:48:59 +00:00
Arnaud
f7d06cd0e8
chore(marketplace): switch to websocket (#1166)
* Switch to websocket

* Create resubscribe future

* Resubscribe websocket events after 5 minutes

* Remove the subscribe workaround and use define the resubscribe symbol

* Use localhost for ws url

* Define 240 seconds for resubscription interval

* Ensute that updates are sync when using ws
2025-05-23 14:13:19 +00:00
Marcin Czenko
748830570a
checked exceptions in stores (#1179)
* checked exceptions in stores

* makes asynciter as much exception safe as it gets

* introduce "SafeAsyncIter" that uses Results and limits exceptions to cancellations

* adds {.push raises: [].} to errors

* uses SafeAsyncIter in "listBlocks" and in "getBlockExpirations"

* simplifies safeasynciter (magic of auto)

* gets rid of ugly casts

* tiny fix in hte way we create raising futures in tests of safeasynciter

* Removes two more casts caused by using checked exceptions

* adds an extended explanation of one more complex SafeAsyncIter test

* adds missing "finishOnErr" param in slice constructor of SafeAsyncIter

* better fix for "Error: Exception can raise an unlisted exception: Exception" error.

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-21 21:17:04 +00:00
markspanbroek
bde98738c2
fix(slotqueue): simplify slot queue workers (#1224)
* fix(slotqueue): simplify slot queue workers

- worker is now just an async running loop
- instead of passing a "done" Future, use an
  AsyncEvent to signal completion

* chore(slotqueue): address review comments

Co-Authored-By: Eric <5089238+emizzle@users.noreply.github.com>
Co-Authored-By: Dmitriy Ryajov <dryajov@gmail.com>

---------

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-05-15 13:02:04 +00:00
Adam Uhlíř
28e87d06cc
docs(openapi): freeSize non-optional (#1211) 2025-05-14 10:14:40 +00:00
Adam Uhlíř
f144099377
fix(api): availability creation validation (#1212) 2025-05-14 08:46:16 +00:00
Adam Uhlíř
19a5e05c13
docs(openapi): add local data delete endpoint (#1214)
* docs(openapi): add local data delete endpoint

* chore: feedback

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>

---------

Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-05-03 16:54:38 +00:00
Slava
b39d541227
chore: update testnet marketplace address (#1209)
https://github.com/codex-storage/nim-codex/issues/1203
2025-04-23 06:18:38 +00:00
Adam Uhlíř
d220e53fe1
ci: trigger python generator upon release (#1208) 2025-04-22 14:46:03 +00:00
Ben Bierens
2eb83a0ebb
Codex-contracts hash in version information. (#1207)
* Adds revision hash of codex-contracts to version information.

* Update codex/conf.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* Update codex/conf.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* Update codex/rest/api.nim

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* simplified git command

* Remove space

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>

* Updates openapi.yaml

---------

Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2025-04-22 14:32:32 +00:00
Slava
22f5150d1d
ci: add compatible contracts image for nim-codex dist-tests docker image (#1204) 2025-04-18 14:21:24 +00:00
Eric
0f152d333c
chore: bump contracts to master (#1197)
Bump contracts to master branch.

There was a change that allowed hardhat to have multiple blocks with the same timestamp, so this needed to be reflected in two tests.
2025-04-17 06:13:43 +00:00
Adam Uhlíř
acf81d0571
chore: add marketplace topic to reservations (#1193) 2025-04-17 04:05:53 +00:00
Adam Uhlíř
7c7871ac75
ci: add compatible contracts image for nim-codex docker image (#1186)
* ci: add compatible contracts image for nim-codex docker image

* ci: with submodules

* ci: with submodules on correct place

* ci: remove double dash

* ci: avoiding artifact conflicts

* ci: add labels to arch images

* ci: correct way to add label to arch images

* ci: correct contract label

* ci: avoid building contracts image and use contracts commit hash

* refactor: better way to get the hash
2025-04-15 13:52:19 +00:00
markspanbroek
b92f79a654
Increase gas estimates (#1192)
* update nim-ethers to version 2.0.0

To allow for gas estimation of contract calls

* contracts: add 10% extra gas to contract calls

These calls could otherwise run out of gas because
the on-chain state may have changed between the time
of the estimate and the time of processing the
transaction.
2025-04-15 10:31:06 +00:00
Arnaud
6f62afef75
Apply changes to the openapi file (#1187) 2025-04-04 12:58:23 +00:00
Arnaud
4e2a321ad5
chore(openapi): add required parameters (#1178)
* Update the openapi file

* Fix typo

* Remove SalesAvailabilityCREATE and add collateralPerByte

* Fix SalesAvailability reference

* chore: adding perf optimization tweaks to openapi (#1182)

* chore: adding perf optimization tweaks to openapi

* chore: slotsize integer

---------

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
2025-04-02 14:09:23 +00:00
Slava
1213377ac4
ci: switch out from ubuntu 20.04 (#1184)
* ci: use ubuntu-latest for coverage (#1141)

* ci: pass --keep-going to lcov and genhtml (#1141)

* ci: use ubuntu-22.04 for release workflow (#1141)

* ci: install gcc-14 on linux (#1141)

* chore: bump nim-leveldbstatic to 0.2.1
2025-04-02 09:09:43 +00:00
munna0908
e9c6d19873
use constantine sha256 for codex tree hashing (#1168) 2025-03-31 06:41:08 +00:00
Marcin Czenko
5ec3b2b027
make sure we do not call "get" on unverified Result while fetching in batches (#1169)
* makes sure we do not call "get" on unverified result

* make handling of failed blocks in fetchBatched even more explicit

* simplifies allFinishedValues and makes it independent from allFinishedFailed

* only sleep if not iter.finished in fetchBatched
2025-03-31 04:57:55 +00:00
Marcin Czenko
0ec52abc98
fixes RandomChunker not respecting padding (#1170) 2025-03-31 04:48:22 +00:00
Arnaud
0032e60398
fix(marketplace): catch Marketplace_SlotIsFree and continue the cancelled process (#1139)
* Catch Marketplace_SlotIsFree and continue the cancelled process

* Add log message when the slot if free during failed state

* Reduce log level to debug for slot free error

* Separate slot mock errors

* Initialize variable in setyp

* Improve tests

* Remove non-meaningful checks and rename test

* Remove the Option in the error setters

* Return collateral when the state is cancelled only if the slot is filled by the host

* Do not propagate AsyncLockError

* Wrap contract error into specific error type

* Remove debug message

* Catch only SlotStateMismatchError in cancelled

* Fix error

* Remove returnBytesWas

* Use MarketError after raises pragma were defined

* Fix typo

* Fix lint
2025-03-26 15:17:39 +00:00
Arnaud
7deeb7d2b3
feat(marketplace): persistent availabilities (#1099)
* Add availability enabled parameter

* Return bytes to availability when finished

* Add until parameter

* Remove debug message

* Clean up and fix tests

* Update documentations and cleanup

* Avoid swallowing CancelledError

* Move until validation to reservations module

* Call onAvailabilityAdded callabck when the availability is enabled in sales

* Remove until validation in restapi when creating an availability

* Add openapi documentation

* Use results instead of stew/results (#1112)

* feat: request duration limit (#1057)

* feat: request duration limit

* Fix tests and duration type

* Add custom error

* Remove merge issue

* Update codex contracts eth

* Update market config and fix test

* Fix SlotReservationsConfig syntax

* Update dependencies

* test: remove doubled test

* chore: update contracts repo

---------

Co-authored-by: Arnaud <arnaud@status.im>

* fix(statemachine): do not raise from state.run (#1115)

* fix(statemachine): do not raise from state.run

* fix rebase

* fix exception handling in SaleProvingSimulated.prove

- re-raise CancelledError
- don't return State on CatchableError
- expect the Proofs_InvalidProof custom error instead of checking a string

* asyncSpawn salesagent.onCancelled

This was swallowing a KeyError in one of the tests (fixed in the previous commit)

* remove error handling states in asyncstatemachine

* revert unneeded changes

* formatting

* PR feedback, logging updates

* chore(integration): simplify block expiration integration test (#1100)

* chore(integration): simplify block expiration integration test

* clean up

* fix after rebase

* perf: contract storage optimizations (#1094)

* perf: contract storage optimizations

* Apply optimization changes

* Apply optimizing parameters sizing

* Update codex-contracts-eth

* bump latest changes in contracts branch

* Change requestDurationLimit to uint64

* fix tests

* fix tests

---------

Co-authored-by: Arnaud <arnaud@status.im>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>

* bump contracts to master (#1122)

* Add availability enabled parameter

* Return bytes to availability when finished

* Add until parameter

* Clean up and fix tests

* Move until validation to reservations module

* Apply suggestion changes: return the reservation module error

* Apply suggestion changes for until dates

* Apply suggestion changes: reorganize tests

* Fix indent

* Remove test related to timing issue

* Add raises errors to async pragram and remove useless try except

* Update open api documentation

* Fix wording

* Remove the httpClient restart statements

* Use market.getRequestEnd to set validUntil

* Remove returnBytes

* Use clock.now in testing

* Move the api validation file to the right file

---------

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-03-26 11:45:22 +00:00
Arnaud
60b6996eb0
chore(marketplace): define raises for async pragma (#1165)
* Define raises for async pragma

* Update nim ethers

* Replace CatchableError by MarketError
2025-03-26 08:06:37 +00:00
Arnaud
a0d6fbaf02
chore(marketplace) - fix the http error codes when validating the availability requests (#1104)
* Use 422 http code when there is a validation error

* Update the open api description

* Fix typo

* Add more tests for total size

* Catch CancelledError because TrackedFuture raise no error

* Split rest api validation test to a new file

* Change the way of testing negative numbers

* Rename client variable and fix test status code

* Try to reduce the number of requests in CI when asserting in tests

* Fix rebase and remove safeEventually
2025-03-24 15:47:05 +00:00
Arnaud
709a8648fd
chore: add request validations (#1144)
* Add request validations

* Define expiry as required field in storage request params and fix tests

* Fix error messages

* Enable logs to figure out the issue with recurring failing test on macos

* Add custom errors raised by contract

* Remove custom error non existing anymore

* Update asynctest module

* Update timer tests after updating asynctest
2025-03-24 11:53:34 +00:00
Dmitriy Ryajov
110147d8ef
monitor background tasks on streaming dataset (#1164) 2025-03-21 17:23:07 +00:00
munna0908
3a312596bf
deps: upgrade libp2p & constantine (#1167)
* upgrade libp2p and constantine

* fix libp2p update issues

* add missing vendor package

* add missing vendor package
2025-03-20 19:11:00 -07:00
Arnaud
9d7b521519
chore: add missing custom errors (#1134)
* Add missing custom errors

* Separate mock state errors

* Remove the Option in the error setters

* Wrap the contract errors in MarketError

* Remove async raises (needs to address it in another PR)

* Wrap contract errors into specific error types

* Rename SlotNotFreeError to SlotStateMismatchError
2025-03-18 07:06:46 +00:00
Giuliano Mega
54177e9fbf
feat(integration): use async client instead of standard Nim HTTP client (#1159)
* WiP: migrating CodexClient to chronos http client

* fix(api): fixes #1163

* feat: fully working API integration tests

* convert most of the tests in testupdownload

* feat: working updownload tests on async client

* feat: make testsales work with async codexclient

* feat: make testpurchasing work with async codexclient

* feat: make testblockexpiration work with async codexclient

* feat: make marketplacesuite work with async codexclient

* make testproofs work with async codexclient

* chore: refactor client to express higher level in terms of lower level operations

* fix: set correct content-length for erasure-coded datasets

* feat: make testecbug work with async client

* feat: make testvalidator work with async client

* refactor: simplify request aliases, add close operation

* wire back client.close at node shutdown

* refactor: remove unused exception

* fix: use await instead of waitFor on async call sites
2025-03-17 20:08:24 +00:00
munna0908
75db491d84
fix: optimise erasure encode/decode (#1123)
* avoid copying block,parity data to shared memory

* use alloc instead of allocShared

* code cleanup
2025-03-14 13:09:18 +00:00
tianzedavid
f1b84dc6d1
chore: fix some typos (#1110)
Signed-off-by: tianzedavid <cuitianze@aliyun.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-03-13 22:46:44 +00:00
Eric
a5db757de3
fix: ethers no longer leaks AsyncLockError (#1146)
* fix: ethers no longer leaks AsyncLockError

* Add message to convertEthersEthers

- adds a message to convertEthersError allowing contextual error messages
- replaces try/except EthersError with convertEthersError (PR feedback)

* bump ethers after PR merged upstream
2025-03-13 22:46:05 +00:00
Ben Bierens
a0ddcef08d
changes trace to info for updates of the annouce/dht record logs (#1156) 2025-03-13 22:45:44 +00:00
Dmitriy Ryajov
1cac3e2a11
Fix/rework async exceptions (#1130)
* cleanup imports and logs

* add BlockHandle type

* revert deps

* refactor: async error handling and future tracking improvements

- Update async procedures to use explicit raises annotation
- Modify TrackedFutures to handle futures with no raised exceptions
- Replace `asyncSpawn` with explicit future tracking
- Update test suites to use `unittest2`
- Standardize error handling across network and async components
- Remove deprecated error handling patterns

This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors.

* bump nim-serde

* remove asyncSpawn

* rework background downloads and prefetch

* imporove logging

* refactor: enhance async procedures with error handling and raise annotations

* misc cleanup

* misc

* refactor: implement allFinishedFailed to aggregate future results with success and failure tracking

* refactor: update error handling in reader procedures to raise ChunkerError and CancelledError

* refactor: improve error handling in wantListHandler and accountHandler procedures

* refactor: simplify LPStreamReadError creation by consolidating parameters

* refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors

* refactor: enhance error handling in advertiser and discovery loops to improve resilience

* misc

* refactor: improve code structure and readability

* remove cancellation from addSlotToQueue

* refactor: add assertion for unexpected errors in local store checks

* refactor: prevent tracking of finished futures and improve test assertions

* refactor: improve error handling in local store checks

* remove usage of msgDetail

* feat: add initial implementation of discovery engine and related components

* refactor: improve task scheduling logic by removing unnecessary break statement

* break after scheduling a task

* make taskHandler cancelable

* refactor: update async handlers to raise CancelledError

* refactor(advertiser): streamline error handling and improve task flow in advertise loops

* fix: correct spelling of "divisible" in error messages and comments

* refactor(discovery): simplify discovery task loop and improve error handling

* refactor(engine): filter peers before processing in cancelBlocks procedure
2025-03-13 07:33:15 -07:00
Arnaud
2538ff8da3
chore: create new httpClient per request (#1136)
* Create new httpClient per request

* Fix tests after rebase and close the clients at the end
2025-03-12 13:41:00 +00:00
Arnaud
17d3bb55cf
chore(marketplace): notify sales when duration, minPricePerBytePerSecond or totalCollateral is updated (#1148)
* Call onAvailabilityAdded when freeSize, duration or minPricePerBytePerSecond is increased

* Rename onAvailabilityAdded to onAvailabilitySaved

* Rename OnAvailabilitySaved to OnAvailabilityUpserted

* Go back to OnAvailabilitySaved
2025-03-12 09:12:06 +00:00
Arnaud
703921df32
chore(restapi): add headers to support on progress when downloading (#1150)
* Add headers to support on progress on download

* Replace http session by http client in downloadBytes

* Use int instead of int64 for datasetSize

* Rename variable to avoid shallowing client
2025-03-10 15:59:24 +00:00
Giuliano Mega
2a3a29720f
Fixes Codex crashes on interrupted downloads (#1151)
* fix: fixes Codex crashes on interrupted downloads

* fix: add better feedback to 404, minor rewording in test comment
2025-03-10 13:27:16 +00:00
Arnaud
eb09e610d5
fix(ci): handle coverage as a string to enable gcc 14 on linux (#1140)
* Handle coverage as a string not a boolean

* Update ubuntu version to latest
2025-03-05 08:35:46 +00:00
Arnaud
7065718e09
feat(marketplace): indicate that slot is being repaired when trying to download (#1083)
* Indicate that slot is being repaired when trying to download

* Fix tests

* Apply nph

* Calculate the repair collateral when adding the item into the queue

* Add slotCollateral calculation with getRequest cache and remove populationItem function

* Update with pricePerByte

* Simplify StorageAsk parameter

* Minor fixes

* Move cache request to another PR

* Rename SlotQueueItem collateral and required in init

* Use override func to optimise calls when the slot state is known

* Remove unused code

* Cosmetic change

* Use raiseMarketError helper

* Add exceptions to async pragma

* Cosmetic change

* Use raiseMarketError helper

* Let slotCollateral determines the slot sate

* Use configSync to avoid async pragma in onStorageRequested

* Add loadConfig function

* Add CatchableError to async pragma

* Add missing pragma raises errors

* Move loadConfig

* Avoid swallow CancelledError

* Avoid swallowing CancelledError

* Avoid swallowing CancelledError

* Update error messages

* Except MarketError instead of CatchableError

* Fix merge issue

* Log fatal when configuration cannot be loaded

* Propagate MarketError in slotCollateral

* Remove useless configSync

* Use result with explicit error

* Fix syntax

---------

Signed-off-by: Arnaud <arnaud@status.im>
2025-02-27 16:58:23 +00:00
Ben Bierens
fab5e16afd
Missing nullability causes json-serialize failure in some generated clients. (#1129) 2025-02-27 11:29:27 +00:00
Slava
16dce0fc43
chore: update testnet marketplace address (#1127)
https://github.com/codex-storage/nim-codex/issues/1126
2025-02-25 09:19:29 +00:00
Dmitriy Ryajov
a609baea26
Add basic retry functionality (#1119)
* adding basic retry functionality

* avoid duplicate requests and batch them

* fix cancelling blocks

* properly resolve blocks

* minor cleanup - use `self`

* avoid useless asyncSpawn

* track retries

* limit max inflight and set libp2p maxIncomingStreams

* cleanup

* add basic yield in readLoop

* use tuple instead of object

* cleanup imports and logs

* increase defaults

* wip

* fix prefetch batching

* cleanup

* decrease timeouts to speedup tests

* remove outdated test

* add retry tests

* should track retries

* remove useless test

* use correct block address (index was off by 1)

* remove duplicate noop proc

* add BlockHandle type

* Use BlockHandle type

* add fetchLocal to control batching from local store

* add format target

* revert deps

* adjust quotaMaxBytes

* cleanup imports and logs

* revert deps

* cleanup blocks on cancelled

* terminate erasure and prefetch jobs on stream end

* split storing and retrieving data into separate tests

* track `b.discoveryLoop` future

* misc

* remove useless check
2025-02-24 21:01:23 +00:00
Eric
f6aee4ff6e
bump contracts to master (#1122) 2025-02-21 11:02:36 +00:00
Adam Uhlíř
44981d24d0
perf: contract storage optimizations (#1094)
* perf: contract storage optimizations

* Apply optimization changes

* Apply optimizing parameters sizing

* Update codex-contracts-eth

* bump latest changes in contracts branch

* Change requestDurationLimit to uint64

* fix tests

* fix tests

---------

Co-authored-by: Arnaud <arnaud@status.im>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-02-20 07:11:06 +00:00
Eric
04327a3986
chore(integration): simplify block expiration integration test (#1100)
* chore(integration): simplify block expiration integration test

* clean up

* fix after rebase
2025-02-20 05:52:51 +00:00
Eric
87590f43ce
fix(statemachine): do not raise from state.run (#1115)
* fix(statemachine): do not raise from state.run

* fix rebase

* fix exception handling in SaleProvingSimulated.prove

- re-raise CancelledError
- don't return State on CatchableError
- expect the Proofs_InvalidProof custom error instead of checking a string

* asyncSpawn salesagent.onCancelled

This was swallowing a KeyError in one of the tests (fixed in the previous commit)

* remove error handling states in asyncstatemachine

* revert unneeded changes

* formatting

* PR feedback, logging updates
2025-02-19 00:18:45 +00:00
Adam Uhlíř
1052dad30c
feat: request duration limit (#1057)
* feat: request duration limit

* Fix tests and duration type

* Add custom error

* Remove merge issue

* Update codex contracts eth

* Update market config and fix test

* Fix SlotReservationsConfig syntax

* Update dependencies

* test: remove doubled test

* chore: update contracts repo

---------

Co-authored-by: Arnaud <arnaud@status.im>
2025-02-18 19:41:54 +00:00
Arnaud
2298a0bf81
Use results instead of stew/results (#1112) 2025-02-18 10:17:05 +00:00
Arnaud
0107eb06fe
chore(marketplace): cid should be bytes (#1073)
* Change cid format from string to bytes for the marketplace

* refactor: marketplace custom errors handling

* chore: update contracts repo

* chore: update contracts submodule

* Update contracts submodule

* Initialize the Cid using init function

* Restorage serialize pragma

* Use Cid object instead of buffer

* Simplify cid usage

* Simplify cid usage

* bump codex-contracts-eth after PR merge, formatting

* fix rebase

* collateralPerByte => collateralPerSlot

---------

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2025-02-18 05:47:47 +00:00
Arnaud
6e73338425
Remove deprecated function (#1111)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-02-17 22:04:04 +00:00
Eric
5af3477793
chore(deps): bump ethers to propagate cancellations (#1116)
* chore(deps): bump ethers to propagate cancellations

Ethers was swallowing canellations and turning them into EthersErrors, which was causing the sales statemachine to error when it should have been simply cancelling the current state's run. Hopefully fixes the intermittently failing marketplace integration test.

* Add missing errors in async raises pragma

* bump to version of ethers that supports cancellations

---------

Co-authored-by: Arnaud <arnaud@status.im>
2025-02-17 22:00:52 +00:00
Arnaud
dc08ff8840
chore(marketplace): add a cache for storage requests (#1090)
* Add cache to for requests

* Change request cache description message and use const as default value

* Set request cache size configuration hidden
2025-02-17 10:34:42 +00:00
Giuliano Mega
25c84f4e0e
Fix/repostore deletes for non-orphan blocks (#1109)
* fix: fix deletion of non-orphan blocks

* feat: improve error feedback for illegal direct block deletes

* chore: minor rewording of test header
2025-02-14 13:34:17 +00:00
munna0908
c65148822e
feat: multithreading support for erasure coding (#1087)
* implement async encode

* implement async decode

* cleanup code

* add num-threads flag

* fix tests

* code cleanup

* improve return types and exception handling for async proc

* add validation check for numThreads flag

* modify encode method

* add new tests for aync encoding

* modify decode method

* cleanup test cases

* add new cli flag for threadCount

* test cleanup

* add new tests

* fix decodeAsync exception handling

* code cleanup

* chore: cosmetic changes
2025-02-12 17:56:26 +00:00
Dmitriy Ryajov
45e97513a7
remove uploadedAt from manifest (#1091)
* remove uploadedAt from manifest

* fix test
2025-02-12 10:48:58 +00:00
Slava
20f6fef7ab
fix: use ubuntu-24.04 runners for docker workflows (#1102)
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2025-02-11 21:49:37 +00:00
Giuliano Mega
bbe1f09cd7
Purging of local datasets (#1103)
* feat(codex-node): add dataset deletion API to Codex node

* feat(api): add deletion of local datasets to API

* fix: logging, remove garbage, drop some CORS headers from DELETE request

* fix: change empty response return code to 204 instead of 200

* fix: add time-based idling to avoid locking up the node during deletes, fix API status code

* fix: uncomment commented tests committed by accident

* fix: return correct code when missing CID is a Manifest CID; add back CORS headers

* fix: remove lingering echo
2025-02-11 19:00:05 +00:00
Marcin Czenko
11888e78d7
deploy openapi spec only when tagged (#1106) 2025-02-11 15:16:45 +00:00
Marcin Czenko
8880ad9cd4
fix linting in "codex/blockexchange/engine/engine.nim" (#1107) 2025-02-11 10:47:25 +00:00
Eric
dfa90a9981
fix(build): compilation on macos when including nim-nat-traversal (#1084)
* fix(build): compilation on macos when including nim-nat-traversal

- removes the `VERSION` rename to `VERSION_temp` in the Makefile
- instead, relies on `-iqoute` to include the `nim-nat-traversal/vendor/libnatpmp-upstream` directory in the search paths. `-iquote` will match the `vendor/libnatpmp-upstream/VERSION` file for `#include "version"` and not `#include <version>`, the latter being what is included by the macos sdk and was causing issues with `-I`. The [gcc 14.2 docs](https://gcc.gnu.org/onlinedocs/gcc-14.2.0/cpp/Invocation.html#index-I) describe how `-iquote` alleviates this issue:
> Directories specified with -iquote apply only to the quote form of the directive, #include "file". Directories specified with -I, -isystem, or -idirafter apply to lookup for both the #include "file" and #include <file> directives.

For more info, please see https://github.com/status-im/nim-nat-traversal/pull/34.

* bump nim-nat-traversal

Now that https://github.com/status-im/nim-nat-traversal/pull/34 has been merged, change back to master commit
2025-02-07 01:18:00 +00:00
Dmitriy Ryajov
17d3f99f45
use a case-of instead of if for better readability (#1063) 2025-02-06 21:36:35 +00:00
Csaba Kiraly
e62a09d9b1
add ccache and sccache to speed up CI (#1074)
* add ccache and sccache to speed up CI

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* include testname and nim version in cache separation

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* Make sure ccache has precedence over custom clang/llvm

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* enable ccache for windows

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* ccache: evict old files

Make sure old unused cache files are not lingering around for long

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

---------

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2025-02-06 21:36:14 +00:00
Marcin Czenko
c05eec422c
fix dataset and slot size calculations in integration tests (#1095)
* fixes datasetSize and slotSize helpers (and also RandomChunker.example)

* adds overload for <<upload>> for seq[byte]

* changes RandomChunker.example to return seq[byte]

* fixes restapi tests after correcting RandomChunker.example

* review: use string.fromBytes from nim-stew to convert seq[byte] to string
2025-02-06 15:21:12 +00:00
Slava
54d499be41
docker: add BOOTSTRAP_NODE_URL to Docker entrypoint (#1098) 2025-02-04 21:22:34 +00:00
Giuliano Mega
0595723f66
Minor improvements to download API (#1092)
* chore: improve error messages in upload API

* chore: remove unreachable (dead) code

* fix: API integration test
2025-02-04 16:01:14 +00:00
Arnaud
58a962add8
Update contracts and add custom errors (#1088) 2025-01-28 11:16:24 +00:00
Marcin Czenko
962fc1cd95
Feat: price per byte (#1078)
* openAPI: StorageRequestCreation: reward => pricePerByte, collateral => collateralPerByte

* purchasing: reward => pricePerByte, collateral => collateralPerByte

* Updates availabilities and reservations to use totalCollateral, minPricePerByte, and maxCollateralPerByte

* Uses correct div operator when operating on UInt256

* proposal updating totalCollateral in availability

* makes sure that reading currentCollateral happens before freeing slot

* Updates naming

* fixes tests: unit and contracts

* uses feat/price-per-byte branch for codex-contracts-eth

* temporarily disables integration tests on CI

* introduces high level <<totalCollateral>> property for a cleaner external interface

* updates integration tests

* Applies review comments

* Updates description of totalCollateral in SalesAvailability

* updates codex-contracts-eth (price-per-byte)
2025-01-24 17:18:00 +00:00
markspanbroek
f6c792de79
fix slotqueue worker starvation (#1081)
* fix slotqueue worker starvation

* improve slotqueue tests

Co-Authored-By: Marcin Czenko <marcin.czenko@pm.me>

* slotqueue nph formatting

---------

Co-authored-by: Marcin Czenko <marcin.czenko@pm.me>
2025-01-23 09:28:14 +00:00
Adam Uhlíř
1c4184f29c
chore: ignoring style formatting commit for git blame (#1085) 2025-01-22 09:05:47 +00:00
Adam Uhlíř
e5df8c50d3
style: nph formatting (#1067)
* style: nph setup

* chore: formates codex/ and tests/ folder with nph 0.6.1
2025-01-21 20:54:46 +00:00
d114e6e942 nix: build nim compiler
Using nimbus build system since the codex now uses version of Nim >2.
Tested with nim versions(>2) in nixpkgs but none of them work thus the
removal of USE_SYSTEM_NIM and its logic.
Signed-off-by: markoburcul <marko@status.im>
2025-01-21 16:45:33 +01:00
893f6d02ab jenkins: create initial pipeline
Referenced issue: https://github.com/codex-storage/nim-codex/issues/940

Signed-off-by: markoburcul <marko@status.im>
2025-01-21 16:45:33 +01:00
Arnaud
389ab59aa7
Use custom error message (#1079) 2025-01-20 16:04:50 +00:00
Giuliano Mega
ac12de37b2
chore: delete leftover code from old threaded erasure coding backend (#1080) 2025-01-20 14:50:33 +00:00
Slava
833e253baa
feat: switch to github arm runners (#1077)
* feat: switch to github arm runners

* chore: fabiocaccamo/create-matrix-action
2025-01-20 14:42:01 +00:00
Arnaud
2ad7c31c85
Remove duplicate header (#971)
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2025-01-16 13:51:07 +00:00
Arnaud
4606726e27
Add default Content-Disposition header for download (#981) 2025-01-16 13:25:26 +00:00
Adam Uhlíř
68ad804f9e
refactor: marketplace custom errors handling (#1061)
* refactor: marketplace custom errors handling

* chore: update contracts repo

* chore: update contracts submodule
2025-01-16 09:34:44 +00:00
Marko Burčul
39e8e6e6fa
nix: update readme (#1064)
Include the instructions for running nim-codex as a systemd service on
NixOS.

Signed-off-by: markoburcul <marko@status.im>
2025-01-14 08:21:11 +00:00
0cffa02748
nix: add codex service definition
Referenced issue: https://github.com/codex-storage/nim-codex/issues/940

Signed-off-by: markoburcul <marko@status.im>
2025-01-10 17:49:20 +01:00
3dc7224330
nix: update nix packages and dependencies in flake
Signed-off-by: markoburcul <marko@status.im>
2025-01-10 17:49:18 +01:00
Arnaud
f25c555d59
Chore/update nim version (#1052)
* Move to version 2.0.6

* Update nim-confutils submodule to latest version

* Update dependencies

* Update Nim version to 2.0.12

* Add gcsafe pragma

* Add missing import

* Update specific conf for Nim 2.x

* Fix method signatures

* Revert erasure coding attempt to fix bug

* More gcsafe pragma

* Duplicate code from libp2p because it is not exported anymore

* Fix camelcase function names

* Use alreadySeen because need is not a bool anymore

* newLPStreamReadError does not exist anymore so use another error

* Replace ValidIpAddress by IpAddress

* Add gcsafe pragma

* Restore maintenance parameter deleted by mistake when removing esasure coding fix attempt code

* Update method signatures

* Copy LPStreamReadError code from libp2p which was removed

* Fix camel case

* Fix enums in tests

* Fix camel case

* Extract node components to a variable to make Nim 2 happy

* Update the tests using ValidIpAddress to IpAddress

* Fix cast for value which is already an option

* Set nim version to 2.0.x for CI

* Set nim version to 2.0.x for CI

* Move to miniupnp version 2.2.4 to avoid symlink error

* Set core.symlinks to false for Windows for miniupnp >= 2.2.5 support

* Update to Nim 2.0.14

* Update CI nim versions to 2.0.14

* Try with GCC 14

* Replace apt-fast by apt-get

* Update ubuntu runner to latest

* Use Ubuntu 20.04 for coverage

* Disable CI cache for coverage

* Add coverage property description

* Remove commented test

* Check the node value of seen instead of using alreadySeen

* Fix the merge. The taskpool work was reverted.

* Update nim-ethers submodule

* Remove deprecated ValidIpAddress. Fix missing case and imports.

* Fix a weird issue where nim-confutils cannot find NatAny

* Fix tests and remove useless static keyword
2025-01-10 14:12:37 +00:00
Ben Bierens
caed3c07a3
Fix sending of WantBlocks messages and tracking of peerWants (#1019)
* sends wantBlock to peers with block. wantHave to everyone else

* Cleanup cheapestPeer. Fixes test for peers lists

* Fixes issue where peerWants are only stored for type wantBlock.

* Review comments by Dmitriy

* consistent logging of addresses

* prevents duplicate scheduling. Fixes cancellation

* fast

* Marks cancel-presence situation with todo comment.

* fixtest: testsales enable logging

* Review by Dmitriy: Remember peerWants only if we don't have them.

* rework `wantListHandler` handling

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2025-01-09 22:44:02 +00:00
munna0908
74c46b3651
network: support for uPnP and PMP nat traversal (#1050)
* add support for uPnP and PMP nat traversal

* update cli flags

* merge with master

* use listener address from switch

* update build script to rename version file

* fix styling issues

* update docker files

- remove Disc_ip env
- update CODE_NAT parsing logic

* code cleanup

* move nat flag parsing logic to conf.nim
2025-01-09 18:11:22 +00:00
Adam Uhlíř
407f77871f
chore: warning cleanup (#1055)
* chore: warning cleanup

* chore: fix proper disabling of warning

* chore: ignore the import when not needed
2025-01-08 11:30:54 +00:00
Arnaud
145aa5d84a
chore: remove old fixme (#1060)
* Remove FIXME

* Fix indentation
2025-01-08 07:45:15 +00:00
Arnaud
0badcb662a
chore(openapi): add announceAddresses for debug (#994)
Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
2025-01-08 07:34:54 +00:00
Adam Uhlíř
4b99b58645
fix: unhide log-format cli flag (#1049) 2024-12-19 12:03:50 +00:00
Eric
6ff4d30b43
fix(tests/integration): fix bootstrap node spr (#1048)
After a change in PR #1031, bootstrap node sprs may not work when Hardhat nodes are started with the tests. This fixes it by appending all started client's and provider's SPR to a sequence, and using that sequence of SPRs to start the next node. This means all subsequently started nodes will be connected to its previously started peers.

This also fixes the case when bootstrap SPRs would not be present if no clients were started.
2024-12-18 08:10:36 +00:00
Eric
8645d336ff
refactor(trackedfutures): remove return of future from tracked futures api (#1046)
- cleans up all instances of `.track` to use the `module.trackedfutures.track(future)` procedure, for better readability
- removes the `track` override that is no longer used in the codebase
2024-12-18 07:39:03 +00:00
Ben Bierens
20bb5e5a38
Applies multinodessuite in twonodessuite (#1031)
* Sets timeout for codexclient httpClient. Adds reliable transfer test.

* disable new test to check timeout setting in CI

* restores new test

* adds heartbeat log and logfile to ci output

* fixes suite

* fixes blocked output stream by switching to multinode fixture

* new twonodessuite based on multinodesuite

* Applies updated twonodessuite

* removes heartbeat log

* applies multinodesuite in testsales

* applies multinodesuite in testmarketplace

* fixes account fetch for host and client in testmarketplace

* adds waitTillNextPeriod at end of marketplace test

* Uses marketplacesuite in testmarketplace
2024-12-17 13:01:41 +00:00
Eric
c498e2f53b
fix(nodeprocess): asyncspawn capture output (#1045)
- Ensures no exceptions are raised from `captureOutput`
- Asyncspawns the future to ensure errors are not silently swallowed
2024-12-17 09:51:38 +00:00
Eric
f0f04ddf1d
refactor(then): removes then util (#1047)
- removes then util as it is no longer being used in the codebase
2024-12-17 09:51:05 +00:00
Eric
bef1160799
fix(validation): asyncSpawns run (#1039)
- annotates run with raises: []
- asyncSpawns run, to ensure there are no escaping exceptions
2024-12-16 06:55:19 +00:00
Eric
b0cc27f563
fix(blockexchange): ensures futures are asyncSpawned (#1037)
* fix(blockexchange): asyncSpawn advertising of local store blocks

* fix(blockexchange): asyncSpawn discoveryQueueLoop

- prevents silently swallowing async errors

* fix(blockexchange): asyncSpawns block exchange tasks

- prevents silently swallow future exceptions
2024-12-16 06:01:49 +00:00
Eric
5f2ba14281
fix(codexnode): ensure timer loop is asyncSpawned (#1038)
* fix(codexnode): stop clock after validator stops

* fix(timer): ensure timer loop is asyncSpawned
2024-12-16 04:24:57 +00:00
Slava
6d415b0ace
ci: split nim-matrix workflow (#1041) 2024-12-15 23:31:55 +00:00
Eric
01fb685bf6
fix(sales): replaces then with asyncSpawn (#1036)
- ensures `addSlotToQueue` does not raise exceptions as it is now asyncSpawned
2024-12-15 23:19:31 +00:00
Marcin Czenko
92a0eda79a
Validator historical state restoration (#922)
* adds a new overload of queryPastEvents allowing to query past events based on timestamp in the past

* adds state restoration to validator

* refactors a bit to get the tests back to work

* replaces deprecated generic methods from Market with methods for specific event types

* Refactors binary search

* adds market tests for querying past SlotFilled events and binary search

* Takes into account that <<earliest>> block available is not necessarily the genesis block

* Adds more logging and makes testing earliest block boundary more reliable

* adds validation tests for historical state restoration

* adds mockprovider to simplify and improve testing of the edge conditions

* adds slot reservation to the new tests after rebasing

* adds validation groups and group index in logs of validator

* adds integration test with two validators

* adds comment on how to enable logging in integration test executable itself

* testIntegration: makes list is running nodes injected and available in the body of the test

* validation: adds integration test for historical state

* adds more logging to validator

* integration test: validator only looks 30 days back for historical state

* adds logging of the slotState when removing slots during validation

* review and refactor validator integration tests

* adds validation to the set of integration tests

* Fixes mistyped name of the mock provider module in testMarket

* Fixes a typo in the name of the validation suite in integration tests

* Makes validation unit test a bit easier to follow

* better use of logScopes to reduce duplication

* improves timing and clarifies the test conditions

* uses http as default RPC provider for nodes running in integration tests as a workaround for dropped subscriptions

* simplifies the validation integration tests by waiting for failed request instead of tracking slots

* adds config option allowing selectively to set different provider url

* Brings back the default settings for RPC provider in integration tests

* use http RPC provider for clients in validation integration tests

* fine-tune the tests

* Makes validator integration test more robust - adds extra tracking

* brings tracking of marketplace event back to validator integration test

* refactors integration tests

* deletes tmp file

* adds <<return>> after forcing integration test to fail preliminarily

* re-enables all integration tests and matrix

* stops debug output in CI

* allows to choose a different RPC provider for a given integration test suite

* fixes signature of <<getBlock>> method in mockProvider

* adds missing import which seem to be braking integration tests on windows

* makes sure that clients, SPs, and validators use the same provider url

* makes validator integration tests using http at 127.0.0.1:8545

* testvalidator: stop resubscribing as we are now using http polling as rpc provider

* applying review comments

* groups queryPastStorage overrides together (review comment)

* groups the historical validation tests into a sub suite

* removes the temporary extensions in marketplacesuite and multinodesuite allowing to specify provider url

* simplifies validation integration tests

* Removes debug logs when waiting for request to fail

* Renaming waitForRequestFailed => waitForRequestToFail

* renames blockNumberForBlocksAgo to pastBlockTag and makes it private

* removes redundant debugging logs

* refines logging in validation

* removes dev logging from mockmarket

* improves exception handling in provider helper procs and prepares for extraction to a separate module

* Uses chronos instead of std/times for Duration

* extracts provider and binary search helpers to a separate module

* removes redundant log entry params from validator

* unifies the notation to consistently use method call syntax

* reuses ProviderError from nim-ethers in the provider extension

* clarifies the comment in multinodesuite

* uses == operator to check the predefined tags and raises exception when `BlockTag.pending` is requested.

* when waiting for request to fail, we break on any request state that is not Started

* removes tests that were moved to testProvider from testMarket

* extracts tests that use MockProvider to a separate async suite

* improves performance of the historical state restoration

* removing redundant log messages in validator (groupIndex and groups)

* adds testProvider to testContracts group

* removes unused import in testMarket
2024-12-14 05:07:55 +00:00
Eric
1f49f86131
fix(slotqueue): asyncSpawns futures correctly (#1034)
- asyncSpawns `run` and worker `dispatch` in slotqueue.
- removes usage of `then` from slotqueue.
2024-12-13 06:42:05 +00:00
Eric
7c804b0ec9
fix(asyncstatemachine): fixes not awaiting or asyncSpawning futures (#1033)
- adds a break in scheduler when CancelledError is caught
- tracks asyncSpawned state.run, so that it can be cancelled during stop
- removes usages of `then`
- ensures that no exceptions are leaked from async procs
2024-12-13 02:35:39 +00:00
Adam Uhlíř
19af79786e
feat: repair is rewarded (#1022)
* feat: repair is rewarded

* chore: update contracts repo

* feat: proving loop handles repair case

* test: assert repair state

* chore: update contracts repo

* fix: upon unknown state of repair go to error
2024-12-12 20:19:56 +00:00
Adam Uhlíř
d10072bf67
refactor: marketplace configuration is cached (#1029) 2024-12-12 12:57:34 +00:00
Eric
da234d503b
fix(trackedfutures): removes usage of then from tracked futures (#1032)
- removes usage of `then`, simplifying the logic, and allowing `then` to be removed completely
- updates annotations to reflect that all procs (sync and async) raise no exceptions
2024-12-12 12:31:51 +00:00
Vaclav Pavlin
855b973811
chore: fix inconsistent metric naming (#1027) 2024-12-12 10:45:47 +00:00
Marko Burčul
0c6784da7e
nix: make derivation and update shell (#1003)
* nix: make derivation and update shell

Create a structure for nix files. Add the derivation file which is using
system Nim to compile Codex.

Referenced issue: https://github.com/codex-storage/nim-codex/issues/940

Signed-off-by: markoburcul <marko@status.im>

* nim-circom-compat: update

Include commit which allows building circom-compat-ffi using Nix(doesn't
affect current usage of the submodule).

Referenced issue: https://github.com/codex-storage/nim-codex/issues/940

Signed-off-by: markoburcul <marko@status.im>

* makefile: fix for detecting linux arch

Signed-off-by: markoburcul <marko@status.im>

---------

Signed-off-by: markoburcul <marko@status.im>
2024-12-09 17:07:01 +00:00
Eric
fb4577f25c
chore(tests): fix unneeded async (#1021)
Removes an unneeded `check eventually` inside a helper proc, removing the need for the proc to be async.
2024-12-06 08:02:57 +00:00
Ben Bierens
f51eae30fe
Fixes raceconditions in testadvertiser (#1008) 2024-12-05 09:30:01 +00:00
Ben Bierens
8e29939cf8
Send pluralized wantBlock messages (#1016)
* don't unroll wantCids when sending wantBlock message in blockPresenceHandler

* workaround logging

* Fixes logformatting upraises for sequences.

* Applies upraises rule for setProperty of textmode for sequences.

* Replaces upraises with raises

* Removes redundant log in sendWantHave
2024-12-04 13:33:48 +00:00
Slava
921159f87f
Run release tests for docker images (#1017)
* Refactor Docker reusable workflow and add release tests support

https://github.com/codex-storage/cs-codex-dist-tests/issues/108

* Add an option to run release tests for Docker images

https://github.com/codex-storage/cs-codex-dist-tests/issues/108

* Use bigger instance for arm builds

https://github.com/codex-storage/cs-codex-dist-tests/issues/108

* Pass repository and branch to release tests workflow

https://github.com/codex-storage/cs-codex-dist-tests/issues/108

* Do not use computation job because run_release_tests is a string

https://github.com/codex-storage/cs-codex-dist-tests/issues/108
2024-12-04 11:52:51 +00:00
Ben Bierens
63e54d135c
Fixes race condition in initial-proving tests (#1007) 2024-12-04 08:56:53 +00:00
Adam Uhlíř
0707446cdd
feat: expose underlying nim-ethers errors to logs (#985)
* feat: expose underlying nim-ethers errors to logs

* chore: bump nim-ethers

* test: fix testproof compilation

* test: raise defects on results error
2024-12-03 11:16:24 +00:00
Marcin Czenko
3f510eb501
fixes incomplete async annotation (#1015) 2024-12-02 15:07:43 +00:00
Ben Bierens
ab019a08ae
Enables stacktrace in docker images (#1013)
* Enables libbacktrace in docker images

* Make libbacktrace configurable in docker via build-arg

---------

Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-12-02 09:32:53 +00:00
Ben Bierens
21249968d4
Requesting the same CID sometimes causes a worker to discard the request if it's already inflight by another worker. (#1002) 2024-11-27 07:13:34 +00:00
Ben Bierens
d47ce38894
Fixes race in testsales (#995)
* Adds isWaiting to mockClock to remove sleep in testsales

* Review comments by Eric. Also replaced two more sleeps with check-eventually
2024-11-26 10:48:52 +00:00
Slava
5c6bbb0cee
chore: update testnet marketplace address (#1000) (#1001)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-11-26 08:16:41 +00:00
Ben Bierens
024c75e4f9
Bumps leveldbstatic to v0.1.6 (#998) 2024-11-25 17:15:05 +00:00
Slava
d12de20868
ci: use rust 1.7.9 for release workflow and dockerfile (#999)
* ci: use rust 1.7.9 for release workflow (#996)

* docker: use rust 1.7.9 for docker (#996)
2024-11-25 16:13:14 +00:00
markspanbroek
29433bad9a
Fix concurrency issues (#993)
* Use http subscriptions instead of websocket for tests

To work around this issue when subscriptions are
inactive for more than 5 minutes:
https://github.com/NomicFoundation/hardhat/issues/2053

Use 100 millisecond polling; default polling interval
of 4 seconds is too close to the 5 second timeout for
`check eventually`.

* use .confirm(1) instead of confirm(0)

confirm(0) doesn't wait at all, confirm(1) waits
for the transaction to be mined

* speed up partial payout integration test

* update nim-ethers to version 0.10.0

includes fixes for http polling and .confirm()

* fix timing of marketplace tests

allow for a bit more time to withdraw funds

* use .confirm(1) in marketplace tests

to ensure that the transaction has been processed
before continuing with the test

* fix timing issue in validation unit test

* fix proof integration test

there were two logic errors in this test:
- a slot is freed anyway at the end of the contract
- when starting the request takes a long time, the
  first slot can already be freed because there were
  too many missing proofs

* fix intermittent error in contract tests

currentTime() doesn't always correctly reflect
the time of the next transaction

* reduce number of slots in integration test

otherwise the windows runner in the CI won't
be able to start the request before it expires

* fix timing in purchasing test

allow for a bit more time for a request to
be submitted

* fix timing of request submission in test

windows ci is so slow, it can take up to 40 seconds
just to submit a storage request to hardhat

* increase proof period to 90 seconds

* adjust timing of integration tests

reason: with the increased period length of 90 seconds, it
can take longer to wait for a stable challenge at the
beginning of a period.

* increase CI timeout to 2 hours

* Fix slow builds on windows

apparently it takes windows 2-3 seconds to
resolve "localhost" to 127.0.0.1 for every
json-rpc connection that we make 🤦
2024-11-25 11:23:04 +00:00
Slava
6038fb456e
ci: split linux and macos tests (#997)
* Split Linux and macOS tests

* Make jobs names more readable
2024-11-22 12:05:00 +00:00
Slava
71b8a95d12
ci: install rust 1.7.9 as required by packages (#996)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-11-22 06:34:10 +00:00
Arnaud
9b7f3f4aaf
chore(openapi): update the openapi spec for the manifest object (#972)
* Update the manifest openapi spec

* Make metadata nullable

Signed-off-by: Arnaud <arnaud@status.im>

---------

Signed-off-by: Arnaud <arnaud@status.im>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2024-11-15 13:21:18 +00:00
Slava
d7ae8b734a
makefile: Use do not use -mssse3 instructions on arm (#940) (#990) 2024-11-09 11:39:39 +00:00
markspanbroek
a6f0311b50
change default proof period to 2 minutes (#989)
on hardhat the proof period remains 1 minute
2024-11-08 08:03:16 +00:00
Marko Burčul
2151e02838
nix-flake: Add shell definition (#954)
Initialized flake file and added development shell definition.
Exporting of default compiler flags is moved to makefile.

Referenced issue: https://github.com/codex-storage/nim-codex/issues/940

Signed-off-by: markoburcul <marko@status.im>
2024-11-04 07:46:22 +00:00
Slava
86257054ee
chore: update testnet marketplace address (#983) (#984)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-11-04 00:41:03 +00:00
Slava
96459188c9
Add ETH_PRIVATE_KEY to Docker entrypoint (#982)
* Add ETH_PRIVATE_KEY to Docker entrypoint

* Add deprecation warning for PRIV_KEY variable

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

---------

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-11-03 06:51:57 +00:00
Ben Bierens
b8dd68063f
fix: bumps ethers to fix missing nonce error (#980)
* fix: bumps ethers to fix missing nonce error

* fix was merged in nim-ethers

---------

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2024-11-01 17:23:43 +00:00
Eric
2b5a40559e
chore: update dependencies, especially nim-ethers to chronos v4 compatible version (#968)
* chore: bump dependencies, including nim-ethers with chronos v4 support

Bumps the following dependencies:
- nim-ethers to commit 507ac6a4cc71cec9be7693fa393db4a49b52baf9 which contains a pinned nim-eth version. This is to be replaced by a versioned library, so it will be pinned to a particular version. There is a crucial fix in this version of ethers that fixes nonce management which is causing issues in the Codex testnet.
- nim-json-rpc to v0.4.4
- nim-json-serialization to v0.2.8
- nim-serde to v1.2.2
- nim-serialization to v0.2.4

Currently, one of the integration tests is failing.

* fix integration test

- When a state's run was cancelled, it was being caught as an error due to catching all CatchableErrors. This caused a state transition to SaleErrored, however cancellation of run was not actually an error. Handling this correctly fixed the issue.
- Stopping of the clock was moved to after `HostInteractions` (sales) which avoided an assertion around getting time when the clock was not started.

* bump ethers to include nonce fix and filter not found fix

* bump ethers: fixes missing symbol not exported in ethers

* Fix cirdl test imports/exports

* Debugging in ci

* Handle CancelledErrors for state.run in one place only

* Rename `config` to `configuration`

There was a symbol clash preventing compilation and it was easiest to rename `config` to `configuration` in the contracts. Not even remotely ideal, but it was the only way.

* bump ethers to latest

Prevents an issue were `JsonNode.items` symbol could not be found

* More changes to support `config` > `configuration`

* cleanup

* testing to see if this fixes failure in ci

* bumps contracts

- ensures slot is free before allowing reservation
- renames config to configuration to avoid symbol clash
2024-10-30 10:40:17 +00:00
Arnaud
942f940c92
Move the upload headers to the POST method (#978) 2024-10-29 14:40:43 +00:00
Slava
a2ac7453fa
Build Postman Collection (#973)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-28 13:53:41 +00:00
Arnaud
2fb7031ec6
feat: add metadata to the manifest (#960)
* Add metadata to the manifest

* Remove useless import

* Fix the openapi documentation

* Use optional fields instead of default values

* Remove testRestApi target

* Return failure when the protobuf cannot get the field

* Set download headers and fix cors headers when an error is returned

* Add tests to verify the download headers

* Try to adjust the content length header

* Fix convertion to string

* Remove the content length header

* Remove testRestApi target

* Removing debug messages
2024-10-25 13:43:19 +00:00
Arnaud
bcc1468130
Remove duplicated header (#970) 2024-10-25 09:23:35 +00:00
Arnaud
40068512a6
Complete documentation for debug endpoint (#969) 2024-10-25 07:20:00 +00:00
Eric
0157ca4c57
fix(slot-reservations): Avoid slot filled cancellations (#963)
* Avoid cancelling states when slot is filled

* improve logging

Improves logging for situations where a Sale should be ignored instead of being considered an error, including when reservation is not allowed and when a slot was filled by another host.

* remove onSlotFilled unit tests from states
2024-10-24 05:56:12 +00:00
Slava
3a2d0926f1
chore: new marketplace address for testnet (#961)
https://github.com/codex-storage/infra-codex/issues/248

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-21 13:01:56 +03:00
Ben Bierens
562e4329e2
Download API upgrade (#955)
* Adds API for fetching manifest only and downloading dataset without stream

* Updates openapi.yaml

* Adds tests for downloading manifest-only and without stream.

* review comments by Giuliano

* updates test clients
2024-10-17 16:54:28 +00:00
Adam Uhlíř
436baef20a
docs: openapi node fix (#950) 2024-10-14 17:26:58 +00:00
Adam Uhlíř
7c33473c88
ci: linux ci runs on ubuntu-20.04 (#953)
* ci: linux ci runs uses ubuntu-20.04

* ci: use ubuntu-20.04 for nim-matrix

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

---------

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-14 11:24:53 +00:00
Ben Bierens
93960033f4
Adds log for cirdl download URL (#948) 2024-10-10 12:05:03 +00:00
Adam Uhlíř
ffa203b04f
feat: partial rewards and withdraws (#880)
* feat: partial rewards and withdraws

* test: missing reserve slot

* test: fix contracts
2024-10-10 11:53:33 +00:00
Ben Bierens
3699601393
Handles LPStreamError in chunker (#947)
* Handles LPStreamError in chunker

* Adds test for lpstream exception

* Adds tests for other stream exceptions. Cleanup.
2024-10-10 11:22:36 +00:00
Arnaud
1fe3abfd03
fix(restapi): Add cors headers when the request is returning errors (#942)
* Add cors headers when the request is returning errors

* Prevent nim presto to send multiple cors headers
2024-10-10 08:25:07 +00:00
Eric
7e0ec3c233
Support enforcement of slot reservations before filling slot (#934) 2024-10-09 04:44:07 +00:00
Slava
44f21b8a68
Update Codex Testnet marketplace contract address (#944)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-08 12:10:42 +03:00
Arnaud
f02de34f77
docs(openapi): provider better documentation for space endpoint parameters (#921)
* Trying to improve documentation

* Update openapi.yaml

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Signed-off-by: Arnaud <arno.deville@gmail.com>

* Update openapi.yaml

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Signed-off-by: Arnaud <arno.deville@gmail.com>

* Update openapi.yaml

Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Signed-off-by: Arnaud <arno.deville@gmail.com>

---------

Signed-off-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
2024-10-08 06:37:10 +00:00
Ben Bierens
17f0988fc7
Fix: null-ref in networkPeer (#937)
* Fixes nullref in networkPeer

* Removes inflight semaphore

* Revert "Removes inflight semaphore"

This reverts commit 26ec15c6f788df3adb6ff3b912a0c4b5d3139358.
2024-10-07 08:50:54 +00:00
Slava
0ea8cfb085
Remove moved docs (#935)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-07 08:33:10 +00:00
Eric
566db2fa30
feat(slot-reservations): support SlotReservationsFull event (#926) 2024-10-05 01:37:08 +00:00
Slava
91e4e368de
Use Ubuntu 20.04 for Linux amd64 releases (#939)
* Use Ubuntu 20.04 for Linux amd64 releases (#932)

* Accept branches with the slash in the name for release workflow (#932)

* Increase artifacts retention-days for release workflow (#932)
2024-10-04 11:07:44 +00:00
Eric
4c51dca299
feat(slot-reservations): Add SaleSlotReserving state (#917)
* convert EthersError to MarketError

* change `canReserveSlot` and `reserveSlot` parameters

Parameters for `canReserveSlot` and `reserveSlot` were changed from `SlotId` to `RequestId` and `UInt256 slotIndex`.

* Add SaleSlotReserving

Adds a new state, SaleSlotReserving, that attempts to reserve a slot before downloading.
If the slot cannot be reserved, the state moves to SaleIgnored.
On error, the state moves to SaleErrored.

SaleIgnored is also updated to pass in `reprocessSlot` and `returnBytes`, controlling the behaviour in the Sales module after the slot is ignored. This is because previously it was assumed that SaleIgnored was only reached when there was no Availability. This is no longer the case, since SaleIgnored can now be reached when a slot cannot be reserved.

* Update SalePreparing

Specify `reprocessSlot` and `returnBytes` when moving to `SaleIgnored` from `SalePreparing`.

Update tests to include test for a raised CatchableError.

* Fix unit test

* Modify `canReserveSlot` and `reverseSlot` params after rebase

* Update MockMarket with new `canReserveSlot` and `reserveSlot` params

* fix after rebase

also bump codex-contracts-eth to master
2024-10-04 06:16:11 +00:00
Eric
b5ee57afd7
feat(slot-reservations): Support reserving slots (#907)
* feat(slot-reservations): Support reserving slots

Closes #898.

Wire up reserveSlot and canReserveSlot contract calls, but don't call them

* Remove return value from `reserveSlot`

* convert EthersError to MarketError

* Move convertEthersError to reserveSlot

* bump codex-contracts-eth after rebase

* change `canReserveSlot` and `reserveSlot` parameters

Parameters for `canReserveSlot` and `reserveSlot` were changed from `SlotId` to `RequestId` and `UInt256 slotIndex`.

* bump codex-contracts-eth after rebase

* bump codex-contracts-eth to master after codex-contracts-eth/pull/177 merged
2024-10-04 03:21:51 +00:00
Slava
b491906005
Remove moved docs (#930)
* Remove moved document

* Update main Readme and point links to the documentation site
2024-10-03 08:55:54 +00:00
Marcin Czenko
5ace105a66
Validator - support partitioning of the slot id space (#890)
* Adds validatorPartitionSize and validatorPartitionIndex config options

* adds partitioning options to the validation type

* adds partitioning logic to the validator

* ignores partitionIndex when partitionSize is either 0 or 1

* clips the partition index to <<partitionIndex mod partitionSize>>

* handles negative values for the validation partition index

* updates long description of the new validator cli options

* makes default partitionSize to be 0 for better backward compatibility

* Improving formatting on validator CLI

* reactors validation params into a separate type and simplifies validation of validation params

* removes suspected duplication

* fixes typo in validator CLI help

* updates README

* Applies review comments - using optionals and range types to handle validation params

* Adds initializer to the configFactory for validatorMaxSlots

* [Review] update validator CLI description and README

* [Review]: renaming validationParams to validationConfig (config)

* [Review]: move validationconfig.nim to a higher level (next to validation.nim)

* changes backing type of MaxSlots to be int and makes sure slots are validated without limit when maxSlots is set to 0

* adds more end-to-end test for the validator and the groups

* fixes typo in README and conf.nim

* makes `maxSlotsConstraintRespected` and `shouldValidateSlot` private + updates the tests

* fixes public address of the signer account in the marketplace tutorial

* applies review comments - removes two tests
2024-10-02 22:00:40 +00:00
Slava
e1a02c8b76
Use CLI args when passed for cirdl in Docker entrypoint (#927)
* Use CLI args when passed for cirdl in Docker entrypoint

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Increase CI timeout

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

---------

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-10-01 13:05:22 +00:00
Marcin Czenko
264bfa17f5
updates Marketplace tutorial documentation (#888)
* updates Marketplace tutorial documentation

* Applies review comments to marketplace-tutorial

* Final formatting touches

* moved `Prerequisites` around

* Fixes indentation in one JSON snippet
2024-09-29 19:18:01 +00:00
Slava
dcfc249945
Move Building Codex guide to the main docs site (#893) 2024-09-29 14:38:57 +00:00
Eric
8b374d5acb
remove erasure and por parameters from openapi spec (#915) 2024-09-25 10:37:19 +00:00
Adam Uhlíř
4b9336ec07
API tweaks for OpenAPI, errors and endpoints (#886)
* All sort of tweaks

* docs: availability's minPrice doc

* Revert changes to the two node test example

* Change default EC params in REST API

Change default EC params in REST API to 3 nodes and 1 tolerance.

Adjust integration tests to honour these settings.

---------

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2024-09-24 08:37:08 +00:00
Ben Bierens
098c4bb6e9
Adds testnet marketplace address to known deployments (#911) 2024-09-24 11:52:07 +03:00
Arnaud
032d7e7495
Support CORS for POST and PATCH availability endpoints (#897) 2024-09-23 15:08:56 +00:00
Ben Bierens
4e8630791a
Rework circuit downloader (#882)
* Introduces a start method to prover

* Moves backend creation into start method

* sets up three paths for backend initialization

* Extracts backend initialization to backend-factory

* Implements loading backend from cli files or previously downloaded local files

* Wires up downloading and unzipping

* functional implementation

* Fixes testprover.nim

* Sets up tests for backendfactory

* includes libzip-dev

* pulls in updated contracts

* removes integration cli tests for r1cs, wasm, and zkey file arguments.

* Fixes issue where inner-scope values are lost before returning

* sets local proof verification for dist-test images

* Adds two traces and bumps nim-ethers

* Adds separate path for circuit files

* Create circuit dir if not exists

* fix: make sure requestStorage is mined

* fix: correct place to plug confirm

* test: fixing contracts tests

* Restores gitmodules

* restores nim-datastore reference

* Sets up downloader exe

* sets up tool skeleton

* implements getting of circuit hash

* Implements downloader tool

* sets up test skeleton

* Implements test for cirdl

* includes testTools in testAll

* Cleanup building.md

* cleans up previous downloader implementation

* cleans up testbackendfactory

* moves start of prover into node.nim

* Fills in arguments in example command

* Initializes backend in prover constructor

* Restores tests

* Restores tests for cli instructions

* Review comments by Dmitriy, part 1

* Quotes path in download instruction.

* replaces curl with chronos http session

* Moves cirdl build output to 'build' folder.

* Fixes chronicles log output

* Add cirdl support to the codex Dockerfile

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Add cirdl support to the docker entrypoint

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Add cirdl support to the release workflow

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Disable verify_circuit flag for releases

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Removes backendFactory placeholder type

* wip

* Replaces zip library with status-im/zippy library (which supports zip and tar)

* Updates cirdl to not change circuitdir folder

* Switches from zip to tar.gz

* Review comments by Dmitriy

* updates codex-contracts-eth

* Adds testTools to CI

* Adds check for access to config.circuitdir

* Update fixture circuit zkey

* Update matrix to run tools tests on Windows

* Adds 'deps' dependency for cirdl

* Adjust docker-entrypoint.sh to use CODEX_CIRCUIT_DIR env var

* Review comments by Giuliano

---------

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Veaceslav Doina <20563034+veaceslavdoina@users.noreply.github.com>
2024-09-23 14:37:17 +00:00
Eric
e8e9820d5b
feat: add --payout-address (#870)
* feat: add `--payout-address`

Allows SPs to be paid out to a separate address, keeping their profits secure.
Supports https://github.com/codex-storage/codex-contracts-eth/pull/144 in the nim-codex client.

* Remove optional payoutAddress

Change --payout-address so that it is no longer optional. There is no longer an overload in `Marketplace.sol` for `fillSlot` accepting no `payoutAddress`.

* Update integration tests to include --payout-address

* move payoutAddress from fillSlot to freeSlot

* Update integration tests to use required payoutAddress

- to make payoutAddress required, the integration tests needed to avoid building the cli params until just before starting the node, otherwise if cli params were added ad-hoc, there would be an error after a non-required parameter was added before a required parameter.

* support client payout address

- withdrawFunds requires a withdrawAddress parameter, directs payouts for withdrawing of client funds (for a cancelled request) to go to that address.

* fix integration test

adds --payout-address to validators

* refactor: support withdrawFunds and freeSlot optional parameters

- withdrawFunds has an optional parameter for withdrawRecipient
- freeSlot has optional parameters for rewardRecipient and collateralRecipient
- change --payout-address to --reward-recipient to match contract signature naming

* Revert "Update integration tests to include --payout-address"

This reverts commit 8f9535cf35b0f2b183ac4013a7ed11b246486964.
There are some valid improvements to the integration tests, but they can be handled in a separate PR.

* small fix

* bump contracts to fix marketplace spec

* bump codex-contracts-eth, now rebased on master

* bump codex-contracts-eth

now that feat/reward-address has been merged to master

* clean up, comments
2024-09-17 04:18:15 +00:00
Ben Bierens
1e2ad95659
Update advertising (#862)
* Setting up advertiser

* Wires up advertiser

* cleanup

* test compiles

* tests pass

* setting up test for advertiser

* Finishes advertiser tests

* fixes commonstore tests

* Review comments by Giuliano

* Race condition found by Giuliano

* Review comment by Dmitriy

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>

* fixes tests

---------

Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2024-08-26 13:18:59 +00:00
Giuliano Mega
e017b05cf1
Remove extra license file (#876)
* remove extra license

* center "apache license"
2024-08-19 09:48:03 +00:00
Eric
63ee4ca0f9
refactor(marketplace): generic querying of historical marketplace events (#872)
* refactor(marketplace): move marketplace events to the Market abstraction

Move marketplace contract events to the Market abstraction so the types can be shared across all modules that call the Market abstraction.

* Remove unneeded conversion

* Switch to generic implementation of event querying

* change parent type to MarketplaceEvent
2024-08-16 02:55:25 +00:00
Arnaud
15303125f6
Support CORS preflight requests when the storage request api returns an error (#878)
* Add CORS headers when the REST API is returning an error

* Use the allowedOrigin instead of the wilcard when setting the origin

Signed-off-by: Arnaud <arnaud@status.im>

---------

Signed-off-by: Arnaud <arnaud@status.im>
2024-08-15 23:57:50 +00:00
Eric
eeb048e386
chore: add downtimeProduct config parameter (#867)
* chore: add `downtimeProduct` config parameter

* bump codex-contracts-eth to master
2024-08-15 02:31:02 +00:00
Arnaud
bb9a5fbe92
Add OPTIONS endpoint to allow the content-type header for the upload endpoint (#869)
* Add OPTIONS endpoint to allow the content-type header
exec git commit --amend --no-edit -S

* Remove useless header "Access-Control-Headers" and add cache

Signed-off-by: Arnaud <arnaud@status.im>

---------

Signed-off-by: Arnaud <arnaud@status.im>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
2024-08-14 21:08:04 +00:00
Giuliano Mega
2771ca6319
Add MIT/Apache licenses (#861)
* Add MIT/Apache licenses

* Center "Apache License"

Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>

* remove wrong legal entity; rename apache license file

---------

Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
2024-08-13 15:38:17 +00:00
Slava
b7934fc686
Downgrade to gcc 13 on Windows (#874)
* Downgrade to gcc 13 on Windows

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

* Increase build job timeout to 90 minutes

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>

---------

Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-08-10 09:50:05 +00:00
Giuliano Mega
4f56f2af26
Chronos v4 Update (v3 Compat Mode) (#814)
* add changes to use chronos v4 in compat mode

* switch chronos to compat fix branch

* use nimbus-build-system with configurable Nim repo

* add missing imports

* add missing await

* bump compat

* pin nim version in Makefile

* add await instead of asyncSpawn to advertisement queue loop

* bump DHT to v0.5.0

* allow error state of `onBatch` to propagate upwards in test code

* pin Nim compiler commit to avoid fetching stale branch

* make CI build against branch head instead of merge

* fix handling of return values in testslotqueue
2024-07-18 21:04:33 +00:00
Giuliano Mega
fbce240e3d
Fixes prover behavior with singleton proof trees (#859)
* add logs and test

* add Merkle proof checks

* factor out Circom input normalization, fix proof input serialization

* add test and update existing ones

* update circuit assets

* add back trace message

* switch contracts to fix branch

* update codex-contracts-eth to latest

* do not expose prove with prenormalized inputs
2024-07-18 13:25:06 +00:00
Slava
8f740b42e6
Update Release workflow (#858)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
2024-07-10 18:14:28 +00:00
Tomasz Bekas
3ae73197c2
Bandaid for failing erasure coding (#855) 2024-07-03 14:44:00 +00:00
Giuliano Mega
fbf1b51f57
Prover workshop band-aid (#853)
* add prover bandaid

* Improve error message text

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>

---------

Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2024-07-03 11:27:56 +00:00
Eric
67facb4b2a
feat(rest): adds erasure coding constraints when requesting storage (#848)
* Rest API: add erasure coding constraints when requesting storage

* clean up

* Make error message for "dataset too small" more informative.

* fix API integration test

---------

Co-authored-by: gmega <giuliano.mega@gmail.com>
2024-06-27 21:26:19 +00:00
Giuliano Mega
b004ca75f6
Bump Nim to 1.6.21 (#851)
* bump Nim to 1.6.21 (range type reset fixes)

* remove incompatible versions from compiler matrix
2024-06-27 19:58:27 +00:00
Tomasz Bekas
11eaebfd77
Fix verifiable manifest constructor (#844)
* Fix verifiable manifest constructor

* Add integration test for verifiable manifest download

Add integration test for testing download of verifiable dataset after creating request for storage

* add missing import

* add testecbug to integration suite

* Remove hardhat instance from integration test

* change description, drop echo

---------

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Co-authored-by: gmega <giuliano.mega@gmail.com>
2024-06-26 20:02:39 +00:00
Eric
5b131611ac
bump nim-leopard to handle no parity data (#845) 2024-06-26 15:41:06 +00:00
Eric
a79bbd459a
prevent node crashing with not val.isNil (#843) 2024-06-26 14:22:44 +00:00
Giuliano Mega
a55b676a42
provisional fix so EC errors do not crash the node on download (#841) 2024-06-25 21:38:53 +00:00
Slava
ad53066131
ci: add verify_circuit=true to the releases (#840) 2024-06-25 18:31:50 +00:00
Giuliano Mega
8138ef5afd
Fix verifiable manifest initialization (#839)
* fix verifiable manifest initialization

* fix linearstrategy, use verifiableStrategy to select blocks for slots

* check for both strategies in attribute inheritance test
2024-06-21 22:50:56 +00:00
Giuliano Mega
4619260dc1
Fix StoreStream so it doesn't return parity bytes (#838)
* fix storestream so it doesn\'t return parity bits for protected/verifiable manifests

* use Cid.example instead of creating a mock manually
2024-06-21 11:09:59 +00:00
Tomasz Bekas
ec7faa21b5
Block deletion with ref count & repostore refactor (#631) 2024-06-20 22:46:06 +00:00
Adam Uhlíř
1a57341b7d
fix: createReservation lock (#825)
* fix: createReservation lock

* fix: additional locking places

* fix: acquire lock

* chore: feedback

Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>

* feat: withLock template and fixed tests

* fix: use proc for MockReservations constructor

* chore: feedback

Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>

* chore: feedback implementation

---------

Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: markspanbroek <mark@spanbroek.net>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
2024-06-20 10:28:01 +00:00
439 changed files with 27136 additions and 13581 deletions

2
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,2 @@
# Formatted with nph v0.6.1-0-g0d8000e
e5df8c50d3b6e70e6eec1ff031657d2b7bb6fe63

View File

@ -11,13 +11,16 @@ inputs:
default: "amd64"
nim_version:
description: "Nim version"
default: "version-1-6"
default: "v2.0.14"
rust_version:
description: "Rust version"
default: "1.78.0"
default: "1.79.0"
shell:
description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail"
coverage:
description: "True if the process is used for coverage"
default: false
runs:
using: "composite"
steps:
@ -31,8 +34,8 @@ runs:
if: inputs.os == 'linux' && (inputs.cpu == 'amd64' || inputs.cpu == 'arm64')
shell: ${{ inputs.shell }} {0}
run: |
sudo apt-fast update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq lcov
- name: APT (Linux i386)
@ -40,8 +43,8 @@ runs:
shell: ${{ inputs.shell }} {0}
run: |
sudo dpkg --add-architecture i386
sudo apt-fast update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib
- name: Homebrew (macOS)
@ -78,6 +81,49 @@ runs:
mingw-w64-i686-ntldd-git
mingw-w64-i686-rust
- name: Install gcc 14 on Linux
# We don't want to install gcc 14 for coverage (Ubuntu 20.04)
if : ${{ inputs.os == 'linux' && inputs.coverage != 'true' }}
shell: ${{ inputs.shell }} {0}
run: |
# Skip for older Ubuntu versions
if [[ $(lsb_release -r | awk -F '[^0-9]+' '{print $2}') -ge 24 ]]; then
# Install GCC-14
sudo apt-get update -qq
sudo apt-get install -yq gcc-14
# Add GCC-14 to alternatives
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
# Set GCC-14 as the default
sudo update-alternatives --set gcc /usr/bin/gcc-14
fi
- name: Install ccache on Linux/Mac
if: inputs.os == 'linux' || inputs.os == 'macos'
uses: hendrikmuhs/ccache-action@v1.2
with:
create-symlink: true
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
evict-old-files: 7d
- name: Install ccache on Windows
if: inputs.os == 'windows'
uses: hendrikmuhs/ccache-action@v1.2
with:
key: ${{ inputs.os }}-${{ inputs.builder }}-${{ inputs.cpu }}-${{ inputs.tests }}-${{ inputs.nim_version }}
evict-old-files: 7d
- name: Enable ccache on Windows
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
CCACHE_DIR=$(dirname $(which ccache))/ccached
mkdir ${CCACHE_DIR}
ln -s $(which ccache) ${CCACHE_DIR}/gcc.exe
ln -s $(which ccache) ${CCACHE_DIR}/g++.exe
ln -s $(which ccache) ${CCACHE_DIR}/cc.exe
ln -s $(which ccache) ${CCACHE_DIR}/c++.exe
echo "export PATH=${CCACHE_DIR}:\$PATH" >> $HOME/.bash_profile # prefix path in MSYS2
- name: Derive environment variables
shell: ${{ inputs.shell }} {0}
run: |
@ -135,8 +181,11 @@ runs:
llvm_bin_dir="${llvm_dir}/bin"
llvm_lib_dir="${llvm_dir}/lib"
echo "${llvm_bin_dir}" >> ${GITHUB_PATH}
# Make sure ccache has precedence (GITHUB_PATH is appending before)
echo "$(brew --prefix)/opt/ccache/libexec" >> ${GITHUB_PATH}
echo $PATH
echo "LDFLAGS=${LDFLAGS} -L${libomp_lib_dir} -L${llvm_lib_dir} -Wl,-rpath,${llvm_lib_dir}" >> ${GITHUB_ENV}
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=${llvm_bin_dir}/clang -DCMAKE_CXX_COMPILER=${llvm_bin_dir}/clang++' -d:LeopardExtraCompilerlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
NIMFLAGS="${NIMFLAGS} $(quote "-d:LeopardCmakeFlags='-DCMAKE_BUILD_TYPE=Release' -d:LeopardExtraCompilerFlags='-fopenmp' -d:LeopardExtraLinkerFlags='-fopenmp -L${libomp_lib_dir}'")"
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
fi
@ -153,6 +202,7 @@ runs:
- name: Restore Nim toolchain binaries from cache
id: nim-cache
uses: actions/cache@v4
if : ${{ inputs.coverage != 'true' }}
with:
path: NimBinaries
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_version }}-cache-${{ env.cache_nonce }}-${{ github.run_id }}
@ -162,9 +212,17 @@ runs:
shell: ${{ inputs.shell }} {0}
run: echo "NIM_COMMIT=${{ inputs.nim_version }}" >> ${GITHUB_ENV}
- name: Build Nim and Codex dependencies
- name: MSYS2 (Windows All) - Disable git symbolic links (since miniupnp 2.2.5)
if: inputs.os == 'windows'
shell: ${{ inputs.shell }} {0}
run: |
git config --global core.symlinks false
- name: Build Nim and Logos Storage dependencies
shell: ${{ inputs.shell }} {0}
run: |
which gcc
gcc --version
make -j${ncpu} CI_CACHE=NimBinaries ${ARCH_OVERRIDE} QUICK_AND_DIRTY_COMPILER=1 update
echo
./env.sh nim --version

View File

@ -3,12 +3,14 @@ Tips for shorter build times
### Runner availability ###
Currently, the biggest bottleneck when optimizing workflows is the availability
of Windows and macOS runners. Therefore, anything that reduces the time spent in
Windows or macOS jobs will have a positive impact on the time waiting for
runners to become available. The usage limits for Github Actions are [described
here][limits]. You can see a breakdown of runner usage for your jobs in the
Github Actions tab ([example][usage]).
When running on the Github free, pro or team plan, the bottleneck when
optimizing workflows is the availability of macOS runners. Therefore, anything
that reduces the time spent in macOS jobs will have a positive impact on the
time waiting for runners to become available. On the Github enterprise plan,
this is not the case and you can more freely use parallelization on multiple
runners. The usage limits for Github Actions are [described here][limits]. You
can see a breakdown of runner usage for your jobs in the Github Actions tab
([example][usage]).
### Windows is slow ###
@ -22,11 +24,10 @@ analysis, etc. are therefore better performed on a Linux runner.
Breaking up a long build job into several jobs that you run in parallel can have
a positive impact on the wall clock time that a workflow runs. For instance, you
might consider running unit tests and integration tests in parallel. Keep in
mind however that availability of macOS and Windows runners is the biggest
bottleneck. If you split a Windows job into two jobs, you now need to wait for
two Windows runners to become available! Therefore parallelization often only
makes sense for Linux jobs.
might consider running unit tests and integration tests in parallel. When
running on the Github free, pro or team plan, keep in mind that availability of
macOS runners is a bottleneck. If you split a macOS job into two jobs, you now
need to wait for two macOS runners to become available.
### Refactoring ###
@ -66,9 +67,10 @@ might seem inconvenient, because when you're debugging an issue you often want
to know whether you introduced a failure on all platforms, or only on a single
one. You might be tempted to disable fail-fast, but keep in mind that this keeps
runners busy for longer on a workflow that you know is going to fail anyway.
Consequent runs will therefore take longer to start. Fail fast is most likely better for overall development speed.
Consequent runs will therefore take longer to start. Fail fast is most likely
better for overall development speed.
[usage]: https://github.com/codex-storage/nim-codex/actions/runs/3462031231/usage
[usage]: https://github.com/logos-storage/logos-storage-nim/actions/runs/3462031231/usage
[composite]: https://docs.github.com/en/actions/creating-actions/creating-a-composite-action
[reusable]: https://docs.github.com/en/actions/using-workflows/reusing-workflows
[cache]: https://github.com/actions/cache/blob/main/workarounds.md#update-a-cache

View File

@ -24,9 +24,9 @@ jobs:
run:
shell: ${{ matrix.shell }} {0}
name: '${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.tests }}'
name: ${{ matrix.os }}-${{ matrix.tests }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}
runs-on: ${{ matrix.builder }}
timeout-minutes: 80
timeout-minutes: 90
steps:
- name: Checkout sources
uses: actions/checkout@v4
@ -38,28 +38,32 @@ jobs:
uses: ./.github/actions/nimbus-build-system
with:
os: ${{ matrix.os }}
cpu: ${{ matrix.cpu }}
shell: ${{ matrix.shell }}
nim_version: ${{ matrix.nim_version }}
coverage: false
## Part 1 Tests ##
- name: Unit tests
if: matrix.tests == 'unittest' || matrix.tests == 'all'
run: make -j${ncpu} test
# workaround for https://github.com/NomicFoundation/hardhat/issues/3877
- name: Setup Node.js
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
uses: actions/setup-node@v4
with:
node-version: 18.15
node-version: 22
- name: Start Ethereum node with Codex contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'all'
working-directory: vendor/codex-contracts-eth
- name: Start Ethereum node with Logos Storage contracts
if: matrix.tests == 'contract' || matrix.tests == 'integration' || matrix.tests == 'tools' || matrix.tests == 'all'
working-directory: vendor/logos-storage-contracts-eth
env:
MSYS2_PATH_TYPE: inherit
run: |
npm install
npm ci
npm start &
# Wait for the contracts to be deployed
sleep 5
## Part 2 Tests ##
- name: Contract tests
@ -69,16 +73,23 @@ jobs:
## Part 3 Tests ##
- name: Integration tests
if: matrix.tests == 'integration' || matrix.tests == 'all'
env:
CODEX_INTEGRATION_TEST_INCLUDES: ${{ matrix.includes }}
run: make -j${ncpu} testIntegration
- name: Upload integration tests log files
uses: actions/upload-artifact@v4
if: (matrix.tests == 'integration' || matrix.tests == 'all') && always()
with:
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-integration-tests-logs
name: ${{ matrix.os }}-${{ matrix.cpu }}-${{ matrix.nim_version }}-${{ matrix.job_number }}-integration-tests-logs
path: tests/integration/logs/
retention-days: 1
## Part 4 Tools ##
- name: Tools tests
if: matrix.tests == 'tools' || matrix.tests == 'all'
run: make -j${ncpu} testTools
status:
if: always()
needs: [build]

View File

@ -9,30 +9,28 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned
nim_version: v2.2.4
concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v4
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix
@ -41,6 +39,19 @@ jobs:
matrix: ${{ needs.matrix.outputs.matrix }}
cache_nonce: ${{ needs.matrix.outputs.cache_nonce }}
linting:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v4
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "codex/ tests/"
fail: true
suggest: true
coverage:
runs-on: ubuntu-latest
steps:
@ -55,6 +66,7 @@ jobs:
with:
os: linux
nim_version: ${{ env.nim_version }}
coverage: true
- name: Generate coverage data
run: |

View File

@ -0,0 +1,19 @@
name: Conventional Commits Linting
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
merge_group:
jobs:
pr-title:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: PR Conventional Commit Validation
uses: ytanikin/pr-conventional-commits@1.4.1
with:
task_types: '["feat","fix","docs","test","ci","build","refactor","style","perf","chore","revert"]'

View File

@ -1,33 +0,0 @@
name: Docker - Dist-Tests
on:
push:
branches:
- master
tags:
- 'v*.*.*'
paths-ignore:
- '**/*.md'
- '.gitignore'
- '.github/**'
- '!.github/workflows/docker-dist-tests.yml'
- '!.github/workflows/docker-reusable.yml'
- 'docker/**'
- '!docker/codex.Dockerfile'
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
jobs:
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
with:
nimflags: '-d:disableMarchNative -d:codex_enable_api_debug_peers=true -d:codex_enable_proof_failures=true -d:codex_enable_log_counter=true'
nat_ip_auto: true
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_suffix: dist-tests
continuous_tests_list: PeersTest HoldMyBeerTest
continuous_tests_duration: 12h
secrets: inherit

View File

@ -34,6 +34,11 @@ on:
description: Set latest tag for Docker images
required: false
type: boolean
tag_stable:
default: false
description: Set stable tag for Docker images
required: false
type: boolean
tag_sha:
default: true
description: Set Git short commit as Docker tag
@ -54,6 +59,19 @@ on:
description: Continuous Tests duration
required: false
type: string
run_release_tests:
description: Run Release tests
required: false
type: string
default: false
contract_image:
description: Specifies compatible smart contract image
required: false
type: string
outputs:
codex_image:
description: Logos Storage Docker image tag
value: ${{ jobs.publish.outputs.codex_image }}
env:
@ -64,19 +82,33 @@ env:
NIMFLAGS: ${{ inputs.nimflags }}
NAT_IP_AUTO: ${{ inputs.nat_ip_auto }}
TAG_LATEST: ${{ inputs.tag_latest }}
TAG_STABLE: ${{ inputs.tag_stable }}
TAG_SHA: ${{ inputs.tag_sha }}
TAG_SUFFIX: ${{ inputs.tag_suffix }}
CONTRACT_IMAGE: ${{ inputs.contract_image }}
# Tests
CONTINUOUS_TESTS_SOURCE: codex-storage/cs-codex-dist-tests
CONTINUOUS_TESTS_BRANCH: master
TESTS_SOURCE: logos-storage/logos-storage-nim-cs-dist-tests
TESTS_BRANCH: master
CONTINUOUS_TESTS_LIST: ${{ inputs.continuous_tests_list }}
CONTINUOUS_TESTS_DURATION: ${{ inputs.continuous_tests_duration }}
CONTINUOUS_TESTS_NAMEPREFIX: c-tests-ci
jobs:
# Compute variables
compute:
name: Compute build ID
runs-on: ubuntu-latest
outputs:
build_id: ${{ steps.build_id.outputs.build_id }}
steps:
- name: Generate unique build id
id: build_id
run: echo "build_id=$(openssl rand -hex 5)" >> $GITHUB_OUTPUT
# Build platform specific image
build:
needs: compute
strategy:
fail-fast: true
matrix:
@ -89,11 +121,11 @@ jobs:
- target:
os: linux
arch: amd64
builder: ubuntu-22.04
builder: ubuntu-24.04
- target:
os: linux
arch: arm64
builder: buildjet-4vcpu-ubuntu-2204-arm
builder: ubuntu-24.04-arm
name: Build ${{ matrix.target.os }}/${{ matrix.target.arch }}
runs-on: ${{ matrix.builder }}
@ -103,11 +135,19 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Docker - Variables
run: |
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
- name: Docker - Set up Buildx
uses: docker/setup-buildx-action@v3
@ -142,7 +182,7 @@ jobs:
- name: Docker - Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ matrix.target.arch }}
name: digests-${{ needs.compute.outputs.build_id }}-${{ matrix.target.arch }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
@ -154,11 +194,12 @@ jobs:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.meta.outputs.version }}
needs: build
codex_image: ${{ steps.image_tag.outputs.codex_image }}
needs: [build, compute]
steps:
- name: Docker - Variables
run: |
# Adjust custom suffix when set and
# Adjust custom suffix when set
if [[ -n "${{ env.TAG_SUFFIX }}" ]]; then
echo "TAG_SUFFIX=-${{ env.TAG_SUFFIX }}" >> $GITHUB_ENV
fi
@ -179,10 +220,15 @@ jobs:
echo "TAG_RAW=false" >> $GITHUB_ENV
fi
# Create contract label for compatible contract image if specified
if [[ -n "${{ env.CONTRACT_IMAGE }}" ]]; then
echo "CONTRACT_LABEL=storage.codex.nim-codex.blockchain-image=${{ env.CONTRACT_IMAGE }}" >> $GITHUB_ENV
fi
- name: Docker - Download digests
uses: actions/download-artifact@v4
with:
pattern: digests-*
pattern: digests-${{ needs.compute.outputs.build_id }}-*
merge-multiple: true
path: /tmp/digests
@ -194,12 +240,14 @@ jobs:
uses: docker/metadata-action@v5
with:
images: ${{ env.DOCKER_REPO }}
labels: ${{ env.CONTRACT_LABEL }}
flavor: |
latest=${{ env.TAG_LATEST }}
suffix=${{ env.TAG_SUFFIX }},onlatest=true
tags: |
type=semver,pattern={{version}}
type=raw,enable=${{ env.TAG_RAW }},value=latest
type=raw,enable=${{ env.TAG_STABLE }},value=stable
type=sha,enable=${{ env.TAG_SHA }}
- name: Docker - Login to Docker Hub
@ -214,54 +262,81 @@ jobs:
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.DOCKER_REPO }}@sha256:%s ' *)
- name: Docker - Image tag
id: image_tag
run: echo "codex_image=${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
- name: Docker - Inspect image
run: |
docker buildx imagetools inspect ${{ env.DOCKER_REPO }}:${{ steps.meta.outputs.version }}
run: docker buildx imagetools inspect ${{ steps.image_tag.outputs.codex_image }}
# Compute Continuous Tests inputs
# Compute Tests inputs
compute-tests-inputs:
name: Compute Continuous Tests list
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
name: Compute Tests inputs
if: ${{ inputs.continuous_tests_list != '' || inputs.run_release_tests == 'true' }}
runs-on: ubuntu-latest
needs: publish
outputs:
source: ${{ steps.compute.outputs.source }}
branch: ${{ steps.compute.outputs.branch }}
branch: ${{ env.TESTS_BRANCH }}
workflow_source: ${{ env.TESTS_SOURCE }}
codexdockerimage: ${{ steps.compute.outputs.codexdockerimage }}
nameprefix: ${{ steps.compute.outputs.nameprefix }}
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
continuous_tests_duration: ${{ steps.compute.outputs.continuous_tests_duration }}
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
workflow_source: ${{ steps.compute.outputs.workflow_source }}
steps:
- name: Compute Continuous Tests list
- name: Compute Tests inputs
id: compute
run: |
echo "source=${{ format('{0}/{1}', github.server_url, env.CONTINUOUS_TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
echo "branch=${{ env.CONTINUOUS_TESTS_BRANCH }}" >> "$GITHUB_OUTPUT"
echo "source=${{ format('{0}/{1}', github.server_url, env.TESTS_SOURCE) }}" >> "$GITHUB_OUTPUT"
echo "codexdockerimage=${{ inputs.docker_repo }}:${{ needs.publish.outputs.version }}" >> "$GITHUB_OUTPUT"
# Compute Continuous Tests inputs
compute-continuous-tests-inputs:
name: Compute Continuous Tests inputs
if: ${{ inputs.continuous_tests_list != '' && github.ref_name == github.event.repository.default_branch }}
runs-on: ubuntu-latest
needs: compute-tests-inputs
outputs:
nameprefix: ${{ steps.compute.outputs.nameprefix }}
continuous_tests_list: ${{ steps.compute.outputs.continuous_tests_list }}
continuous_tests_duration: ${{ env.CONTINUOUS_TESTS_DURATION }}
continuous_tests_workflow: ${{ steps.compute.outputs.continuous_tests_workflow }}
steps:
- name: Compute Continuous Tests inputs
id: compute
run: |
echo "nameprefix=$(awk '{ print tolower($0) }' <<< ${{ env.CONTINUOUS_TESTS_NAMEPREFIX }})" >> "$GITHUB_OUTPUT"
echo "continuous_tests_list=$(jq -cR 'split(" ")' <<< '${{ env.CONTINUOUS_TESTS_LIST }}')" >> "$GITHUB_OUTPUT"
echo "continuous_tests_duration=${{ env.CONTINUOUS_TESTS_DURATION }}" >> "$GITHUB_OUTPUT"
echo "workflow_source=${{ env.CONTINUOUS_TESTS_SOURCE }}" >> "$GITHUB_OUTPUT"
# Run Continuous Tests
run-tests:
run-continuous-tests:
name: Run Continuous Tests
needs: [publish, compute-tests-inputs]
needs: [compute-tests-inputs, compute-continuous-tests-inputs]
strategy:
max-parallel: 1
matrix:
tests: ${{ fromJSON(needs.compute-tests-inputs.outputs.continuous_tests_list) }}
uses: codex-storage/cs-codex-dist-tests/.github/workflows/run-continuous-tests.yaml@master
tests: ${{ fromJSON(needs.compute-continuous-tests-inputs.outputs.continuous_tests_list) }}
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-continuous-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
nameprefix: ${{ needs.compute-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
nameprefix: ${{ needs.compute-continuous-tests-inputs.outputs.nameprefix }}-${{ matrix.tests }}-${{ needs.compute-continuous-tests-inputs.outputs.continuous_tests_duration }}
tests_filter: ${{ matrix.tests }}
tests_target_duration: ${{ needs.compute-tests-inputs.outputs.continuous_tests_duration }}
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
secrets: inherit
# Run Release Tests
run-release-tests:
name: Run Release Tests
needs: [compute-tests-inputs]
if: ${{ inputs.run_release_tests == 'true' }}
uses: logos-storage/logos-storage-nim-cs-dist-tests/.github/workflows/run-release-tests.yaml@master
with:
source: ${{ needs.compute-tests-inputs.outputs.source }}
branch: ${{ needs.compute-tests-inputs.outputs.branch }}
codexdockerimage: ${{ needs.compute-tests-inputs.outputs.codexdockerimage }}
workflow_source: ${{ needs.compute-tests-inputs.outputs.workflow_source }}
secrets: inherit

View File

@ -18,11 +18,27 @@ on:
- '!docker/docker-entrypoint.sh'
workflow_dispatch:
jobs:
get-contracts-hash:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.get-hash.outputs.hash }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Get submodule short hash
id: get-hash
run: |
hash=$(git rev-parse --short HEAD:vendor/logos-storage-contracts-eth)
echo "hash=$hash" >> $GITHUB_OUTPUT
build-and-push:
name: Build and Push
uses: ./.github/workflows/docker-reusable.yml
needs: get-contracts-hash
with:
tag_latest: ${{ github.ref_name == github.event.repository.default_branch || startsWith(github.ref, 'refs/tags/') }}
tag_stable: ${{ startsWith(github.ref, 'refs/tags/') }}
contract_image: "codexstorage/codex-contracts-eth:sha-${{ needs.get-contracts-hash.outputs.hash }}"
secrets: inherit

View File

@ -2,17 +2,17 @@ name: OpenAPI
on:
push:
branches:
- 'master'
tags:
- "v*.*.*"
paths:
- 'openapi.yaml'
- '.github/workflows/docs.yml'
- "openapi.yaml"
- ".github/workflows/docs.yml"
pull_request:
branches:
- '**'
- "**"
paths:
- 'openapi.yaml'
- '.github/workflows/docs.yml'
- "openapi.yaml"
- ".github/workflows/docs.yml"
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
@ -28,38 +28,39 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: '0'
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Lint OpenAPI
shell: bash
run: npx @redocly/cli lint openapi.yaml
deploy:
name: Deploy
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/master'
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: '0'
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Build OpenAPI
shell: bash
run: npx @redocly/cli build-docs openapi.yaml --output "openapi/index.html" --title "Codex API"
run: npx @redocly/cli build-docs openapi.yaml --output openapi/index.html --title "Logos Storage API"
- name: Build Postman Collection
run: npx -y openapi-to-postmanv2 -s openapi.yaml -o openapi/postman.json -p -O folderStrategy=Tags,includeAuthInfoInExample=false
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: './openapi'
path: openapi
- name: Deploy to GitHub Pages
uses: actions/deploy-pages@v4

View File

@ -6,7 +6,7 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned, v1.6.16, v1.6.18
nim_version: pinned
jobs:
matrix:
@ -15,12 +15,14 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v4
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-latest}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
run: |
echo 'matrix<<EOF' >> $GITHUB_OUTPUT
tools/scripts/ci-job-matrix.sh linux >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
build:
needs: matrix

View File

@ -4,14 +4,19 @@ on:
push:
tags:
- 'v*.*.*'
branches:
- master
workflow_dispatch:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: v1.6.14
rust_version: 1.78.0
binary_base: codex
upload_to_codex: false
nim_version: pinned
rust_version: 1.79.0
storage_binary_base: storage
cirdl_binary_base: cirdl
build_dir: build
nim_flags: ''
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
jobs:
# Matrix
@ -22,12 +27,11 @@ jobs:
steps:
- name: Compute matrix
id: matrix
uses: fabiocaccamo/create-matrix-action@v4
uses: fabiocaccamo/create-matrix-action@v5
with:
matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-22.04}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {buildjet-4vcpu-ubuntu-2204-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {linux}, cpu {arm64}, builder {ubuntu-22.04-arm}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {arm64}, builder {macos-14}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, nim_version {${{ env.nim_version }}}, rust_version {${{ env.rust_version }}}, shell {msys2}
@ -68,20 +72,50 @@ jobs:
macos*) os_name="darwin" ;;
windows*) os_name="windows" ;;
esac
binary="${{ env.binary_base }}-${{ github.ref_name }}-${os_name}-${{ matrix.cpu }}"
[[ ${os_name} == "windows" ]] && binary="${binary}.exe"
echo "binary=${binary}" >>$GITHUB_ENV
github_ref_name="${GITHUB_REF_NAME/\//-}"
storage_binary="${{ env.storage_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
cirdl_binary="${{ env.cirdl_binary_base }}-${github_ref_name}-${os_name}-${{ matrix.cpu }}"
if [[ ${os_name} == "windows" ]]; then
storage_binary="${storage_binary}.exe"
cirdl_binary="${cirdl_binary}.exe"
fi
echo "storage_binary=${storage_binary}" >>$GITHUB_ENV
echo "cirdl_binary=${cirdl_binary}" >>$GITHUB_ENV
- name: Release - Build
run: |
make NIMFLAGS="--out:${{ env.binary }}"
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.storage_binary }} ${{ env.nim_flags }}"
make cirdl NIMFLAGS="--out:${{ env.build_dir }}/${{ env.cirdl_binary }} ${{ env.nim_flags }}"
- name: Release - Upload binaries
- name: Release - Libraries
run: |
if [[ "${{ matrix.os }}" == "windows" ]]; then
for lib in ${{ env.windows_libs }}; do
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}"
done
fi
- name: Release - Upload Logos Storage build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.binary }}
path: ${{ env.binary }}
retention-days: 1
name: release-${{ env.storage_binary }}
path: ${{ env.build_dir }}/${{ env.storage_binary_base }}*
retention-days: 30
- name: Release - Upload cirdl build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.cirdl_binary }}
path: ${{ env.build_dir }}/${{ env.cirdl_binary_base }}*
retention-days: 30
- name: Release - Upload windows libs
if: matrix.os == 'windows'
uses: actions/upload-artifact@v4
with:
name: release-${{ matrix.os }}-libs
path: ${{ env.build_dir }}/*.dll
retention-days: 30
# Release
release:
@ -99,47 +133,85 @@ jobs:
- name: Release - Compress and checksum
run: |
cd /tmp/release
prepare() {
# Checksum
checksum() {
arc="${1}"
sha256sum "${arc}" >"${arc}.sha256"
# Upload to Codex
if [[ "${{ env.upload_to_codex }}" == "true" ]]; then
codex_endpoints="${{ secrets.CODEX_ENDPOINTS }}"
codex_username="${{ secrets.CODEX_USERNAME }}"
codex_password="${{ secrets.CODEX_PASSWORD }}"
for endpoint in ${codex_endpoints}; do
echo "::add-mask::${endpoint}"
cid=$(curl -X POST \
"${endpoint}/api/codex/v1/data" \
-u "${codex_username}":"${codex_password}" \
-H "content-type: application/octet-stream" \
-T "${arc}")
echo "${cid}" >"${arc}.cid"
done
fi
}
# Compress and prepare
for file in *; do
for file in ${{ env.storage_binary_base }}* ${{ env.cirdl_binary_base }}*; do
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
arc="${file%.*}.zip"
zip "${arc}" "${file}"
checksum "${arc}"
# Windows - binary and libs
arc="${file%.*}-libs.zip"
zip "${arc}" "${file}" ${{ env.windows_libs }}
rm -f "${file}"
prepare "${arc}"
checksum "${arc}"
else
# Linux/macOS
arc="${file}.tar.gz"
chmod 755 "${file}"
tar cfz "${arc}" "${file}"
rm -f "${file}"
prepare "${arc}"
checksum "${arc}"
fi
done
rm -f ${{ env.windows_libs }}
- name: Release - Upload compressed artifacts and checksums
uses: actions/upload-artifact@v4
with:
name: archives-and-checksums
path: /tmp/release/
retention-days: 30
- name: Release - Upload to the cloud
env:
s3_endpoint: ${{ secrets.S3_ENDPOINT }}
s3_bucket: ${{ secrets.S3_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
run: |
# Variables
branch="${GITHUB_REF_NAME/\//-}"
folder="/tmp/release"
# Tagged releases
if [[ "${{ github.ref }}" == *"refs/tags/"* ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/releases/${branch} --endpoint-url ${{ env.s3_endpoint }}
echo "${branch}" > "${folder}"/latest
aws s3 cp "${folder}"/latest s3://${{ env.s3_bucket }}/releases/latest --endpoint-url ${{ env.s3_endpoint }}
rm -f "${folder}"/latest
# master branch
elif [[ "${branch}" == "${{ github.event.repository.default_branch }}" ]]; then
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/${branch} --endpoint-url ${{ env.s3_endpoint }}
# Custom branch
else
aws s3 cp --recursive "${folder}" s3://${{ env.s3_bucket }}/branches/${branch} --endpoint-url ${{ env.s3_endpoint }}
fi
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
/tmp/release/*
make_latest: true
- name: Generate Python SDK
uses: peter-evans/repository-dispatch@v3
if: startsWith(github.ref, 'refs/tags/')
with:
token: ${{ secrets.DISPATCH_PAT }}
repository: logos-storage/logos-storage-py-api-client
event-type: generate
client-payload: '{"openapi_url": "https://raw.githubusercontent.com/logos-storage/logos-storage-nim/${{ github.ref }}/openapi.yaml"}'

7
.gitignore vendored
View File

@ -3,10 +3,15 @@
!*.*
*.exe
!LICENSE*
!Makefile
!Jenkinsfile
nimcache/
# Executables when using nix will be stored in result/ directory
result/
# Executables shall be put in an ignored build/ directory
build/
@ -40,3 +45,5 @@ docker/prometheus-data
.DS_Store
nim.cfg
tests/integration/logs
data/

61
.gitmodules vendored
View File

@ -37,22 +37,17 @@
path = vendor/nim-nitro
url = https://github.com/status-im/nim-nitro.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/questionable"]
path = vendor/questionable
url = https://github.com/status-im/questionable.git
ignore = untracked
branch = master
[submodule "vendor/upraises"]
path = vendor/upraises
url = https://github.com/markspanbroek/upraises.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/asynctest"]
path = vendor/asynctest
url = https://github.com/status-im/asynctest.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-presto"]
path = vendor/nim-presto
url = https://github.com/status-im/nim-presto.git
@ -132,7 +127,7 @@
path = vendor/nim-websock
url = https://github.com/status-im/nim-websock.git
ignore = untracked
branch = master
branch = main
[submodule "vendor/nim-contract-abi"]
path = vendor/nim-contract-abi
url = https://github.com/status-im/nim-contract-abi
@ -160,13 +155,13 @@
path = vendor/nim-taskpools
url = https://github.com/status-im/nim-taskpools.git
ignore = untracked
branch = master
branch = stable
[submodule "vendor/nim-leopard"]
path = vendor/nim-leopard
url = https://github.com/status-im/nim-leopard.git
[submodule "vendor/nim-codex-dht"]
path = vendor/nim-codex-dht
url = https://github.com/codex-storage/nim-codex-dht.git
[submodule "vendor/logos-storage-nim-dht"]
path = vendor/logos-storage-nim-dht
url = https://github.com/logos-storage/logos-storage-nim-dht.git
ignore = untracked
branch = master
[submodule "vendor/nim-datastore"]
@ -178,9 +173,11 @@
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth
[submodule "vendor/codex-contracts-eth"]
path = vendor/codex-contracts-eth
url = https://github.com/status-im/codex-contracts-eth
[submodule "vendor/logos-storage-contracts-eth"]
path = vendor/logos-storage-contracts-eth
url = https://github.com/logos-storage/logos-storage-contracts-eth.git
ignore = untracked
branch = master
[submodule "vendor/nim-protobuf-serialization"]
path = vendor/nim-protobuf-serialization
url = https://github.com/status-im/nim-protobuf-serialization
@ -195,23 +192,41 @@
url = https://github.com/zevv/npeg
[submodule "vendor/nim-poseidon2"]
path = vendor/nim-poseidon2
url = https://github.com/codex-storage/nim-poseidon2.git
url = https://github.com/logos-storage/nim-poseidon2.git
ignore = untracked
branch = master
[submodule "vendor/constantine"]
path = vendor/constantine
url = https://github.com/mratsim/constantine.git
[submodule "vendor/nim-circom-compat"]
path = vendor/nim-circom-compat
url = https://github.com/codex-storage/nim-circom-compat.git
url = https://github.com/logos-storage/nim-circom-compat.git
ignore = untracked
branch = master
[submodule "vendor/codex-storage-proofs-circuits"]
path = vendor/codex-storage-proofs-circuits
url = https://github.com/codex-storage/codex-storage-proofs-circuits.git
[submodule "vendor/logos-storage-proofs-circuits"]
path = vendor/logos-storage-proofs-circuits
url = https://github.com/logos-storage/logos-storage-proofs-circuits.git
ignore = untracked
branch = master
[submodule "vendor/nim-serde"]
path = vendor/nim-serde
url = https://github.com/codex-storage/nim-serde.git
url = https://github.com/logos-storage/nim-serde.git
[submodule "vendor/nim-leveldbstatic"]
path = vendor/nim-leveldbstatic
url = https://github.com/codex-storage/nim-leveldb.git
url = https://github.com/logos-storage/nim-leveldb.git
[submodule "vendor/nim-zippy"]
path = vendor/nim-zippy
url = https://github.com/status-im/nim-zippy.git
[submodule "vendor/nph"]
path = vendor/nph
url = https://github.com/arnetheduck/nph.git
[submodule "vendor/nim-quic"]
path = vendor/nim-quic
url = https://github.com/vacp2p/nim-quic.git
ignore = untracked
branch = main
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = main

View File

@ -1,192 +0,0 @@
# Building Codex
## Table of Contents
- [Install developer tools](#prerequisites)
- [Linux](#linux)
- [macOS](#macos)
- [Windows + MSYS2](#windows--msys2)
- [Other](#other)
- [Clone and prepare the Git repository](#repository)
- [Build the executable](#executable)
- [Run the example](#example-usage)
**Optional**
- [Run the tests](#tests)
## Prerequisites
To build nim-codex, developer tools need to be installed and accessible in the OS.
Instructions below correspond roughly to environmental setups in nim-codex's [CI workflow](https://github.com/codex-storage/nim-codex/blob/main/.github/workflows/ci.yml) and are known to work.
Other approaches may be viable. On macOS, some users may prefer [MacPorts](https://www.macports.org/) to [Homebrew](https://brew.sh/). On Windows, rather than use MSYS2, some users may prefer to install developer tools with [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/), [Scoop](https://scoop.sh/), or [Chocolatey](https://chocolatey.org/), or download installers for e.g. Make and CMake while otherwise relying on official Windows developer tools. Community contributions to these docs and our build system are welcome!
### Rust
The current implementation of Codex's zero-knowledge proving circuit requires the installation of rust v1.76.0 or greater. Be sure to install it for your OS and add it to your terminal's path such that the command `cargo --version` gives a compatible version.
### Linux
*Package manager commands may require `sudo` depending on OS setup.*
On a bare bones installation of Debian (or a distribution derived from Debian, such as Ubuntu), run
```shell
apt-get update && apt-get install build-essential cmake curl git rustc cargo
```
Non-Debian distributions have different package managers: `apk`, `dnf`, `pacman`, `rpm`, `yum`, etc.
For example, on a bare bones installation of Fedora, run
```shell
dnf install @development-tools cmake gcc-c++ rust cargo
```
In case your distribution does not provide required Rust version, we may install it using [rustup](https://www.rust-lang.org/tools/install)
```shell
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --default-toolchain=1.76.0 -y
. "$HOME/.cargo/env"
```
### macOS
Install the [Xcode Command Line Tools](https://mac.install.guide/commandlinetools/index.html) by opening a terminal and running
```shell
xcode-select --install
```
Install [Homebrew (`brew`)](https://brew.sh/) and in a new terminal run
```shell
brew install bash cmake rust
```
Check that `PATH` is setup correctly
```shell
which bash cmake
# /usr/local/bin/bash
# /usr/local/bin/cmake
```
### Windows + MSYS2
*Instructions below assume the OS is 64-bit Windows and that the hardware or VM is [x86-64](https://en.wikipedia.org/wiki/X86-64) compatible.*
Download and run the installer from [msys2.org](https://www.msys2.org/).
Launch an MSYS2 [environment](https://www.msys2.org/docs/environments/). UCRT64 is generally recommended: from the Windows *Start menu* select `MSYS2 MinGW UCRT x64`.
Assuming a UCRT64 environment, in Bash run
```shell
pacman -Suy
pacman -S base-devel git unzip mingw-w64-ucrt-x86_64-toolchain mingw-w64-ucrt-x86_64-cmake mingw-w64-ucrt-x86_64-rust
```
<!-- #### Headless Windows container -->
<!-- add instructions re: getting setup with MSYS2 in a Windows container -->
<!-- https://github.com/StefanScherer/windows-docker-machine -->
#### Optional: VSCode Terminal integration
You can link the MSYS2-UCRT64 terminal into VSCode by modifying the configuration file as shown below.
File: `C:/Users/<username>/AppData/Roaming/Code/User/settings.json`
```json
{
...
"terminal.integrated.profiles.windows": {
...
"MSYS2-UCRT64": {
"path": "C:\\msys64\\usr\\bin\\bash.exe",
"args": [
"--login",
"-i"
],
"env": {
"MSYSTEM": "UCRT64",
"CHERE_INVOKING": "1",
"MSYS2_PATH_TYPE": "inherit"
}
}
}
}
```
### Other
It is possible that nim-codex can be built and run on other platforms supported by the [Nim](https://nim-lang.org/) language: BSD family, older versions of Windows, etc. There has not been sufficient experimentation with nim-codex on such platforms, so instructions are not provided. Community contributions to these docs and our build system are welcome!
## Repository
In Bash run
```shell
git clone https://github.com/codex-storage/nim-codex.git repos/nim-codex && cd repos/nim-codex
```
nim-codex uses the [nimbus-build-system](https://github.com/status-im/nimbus-build-system), so next run
```shell
make update
```
This step can take a while to complete because by default it builds the [Nim compiler](https://nim-lang.org/docs/nimc.html).
To see more output from `make` pass `V=1`. This works for all `make` targets in projects using the nimbus-build-system
```shell
make V=1 update
```
## Executable
In Bash run
```shell
make
```
The default `make` target creates the `build/codex` executable.
## Example usage
See the [instructions](README.md#cli-options) in the main readme.
## Tests
In Bash run
```shell
make test
```
### testAll
#### Prerequisites
To run the integration tests, an Ethereum test node is required. Follow these instructions to set it up.
##### Windows (do this before 'All platforms')
1. Download and install Visual Studio 2017 or newer. (Not VSCode!) In the Workloads overview, enable `Desktop development with C++`. ( https://visualstudio.microsoft.com )
##### All platforms
1. Install NodeJS (tested with v18.14.0), consider using NVM as a version manager. [Node Version Manager (`nvm`)](https://github.com/nvm-sh/nvm#readme)
1. Open a terminal
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
1. `npm install` -> Should complete with the number of packages added and an overview of known vulnerabilities.
1. `npm test` -> Should output test results. May take a minute.
Before the integration tests are started, you must start the Ethereum test node manually.
1. Open a terminal
1. Go to the vendor/codex-contracts-eth folder: `cd /<git-root>/vendor/codex-contracts-eth/`
1. `npm start` -> This should launch Hardhat, and output a number of keys and a warning message.
#### Run
The `testAll` target runs the same tests as `make test` and also runs tests for nim-codex's Ethereum contracts, as well a basic suite of integration tests.
To run `make testAll`.
Use a new terminal to run:
```shell
make testAll
```

37
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,37 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.9.13'
pipeline {
agent { label 'linux && x86_64 && nix-2.24' }
options {
disableConcurrentBuilds()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '20',
daysToKeepStr: '30',
))
}
stages {
stage('Build') {
steps {
script {
nix.flake("default")
}
}
}
stage('Check') {
steps {
script {
sh './result/bin/storage --version'
}
}
}
}
post {
cleanup { cleanWs() }
}
}

199
LICENSE-APACHEv2 Normal file
View File

@ -0,0 +1,199 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
LICENSE-MIT Normal file
View File

@ -0,0 +1,19 @@
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

124
Makefile
View File

@ -15,7 +15,7 @@
#
# If NIM_COMMIT is set to "nimbusbuild", this will use the
# version pinned by nimbus-build-system.
PINNED_NIM_VERSION := v1.6.14
PINNED_NIM_VERSION := v2.2.4
ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION)
@ -40,6 +40,30 @@ DOCKER_IMAGE_NIM_PARAMS ?= -d:chronicles_colors:none -d:insecure
LINK_PCRE := 0
ifeq ($(OS),Windows_NT)
ifeq ($(PROCESSOR_ARCHITECTURE), AMD64)
ARCH = x86_64
endif
ifeq ($(PROCESSOR_ARCHITECTURE), ARM64)
ARCH = arm64
endif
else
UNAME_P := $(shell uname -m)
ifneq ($(filter $(UNAME_P), i686 i386 x86_64),)
ARCH = x86_64
endif
ifneq ($(filter $(UNAME_P), aarch64 arm),)
ARCH = arm64
endif
endif
ifeq ($(ARCH), x86_64)
CXXFLAGS ?= -std=c++17 -mssse3
else
CXXFLAGS ?= -std=c++17
endif
export CXXFLAGS
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
@ -69,10 +93,15 @@ else # "variables.mk" was included. Business as usual until the end of this file
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
# Builds the Logos Storage binary
all: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim codex $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim storage $(NIM_PARAMS) build.nims
# Build tools/cirdl
cirdl: | deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim toolsCirdl $(NIM_PARAMS) build.nims
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
@ -109,12 +138,12 @@ test: | build deps
# Builds and runs the smart contract tests
testContracts: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim testContracts $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs the integration tests
testIntegration: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) build.nims
$(ENV_SCRIPT) nim testIntegration $(NIM_PARAMS) --define:ws_resubscribe=240 build.nims
# Builds and runs all tests (except for Taiko L2 tests)
testAll: | build deps
@ -124,7 +153,12 @@ testAll: | build deps
# Builds and runs Taiko L2 tests
testTaiko: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) codex.nims
$(ENV_SCRIPT) nim testTaiko $(NIM_PARAMS) build.nims
# Builds and runs tool tests
testTools: | cirdl
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim testTools $(NIM_PARAMS) build.nims
# nim-libbacktrace
LIBBACKTRACE_MAKE_FLAGS := -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
@ -144,11 +178,11 @@ coverage:
$(MAKE) NIMFLAGS="$(NIMFLAGS) --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" test
cd nimcache/release/testCodex && rm -f *.c
mkdir -p coverage
lcov --capture --directory nimcache/release/testCodex --output-file coverage/coverage.info
lcov --capture --keep-going --directory nimcache/release/testCodex --output-file coverage/coverage.info
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
shopt -s globstar && lcov --extract coverage/coverage.info --keep-going $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
echo -e $(BUILD_MSG) "coverage/report/index.html"
genhtml coverage/coverage.f.info --output-directory coverage/report
genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report
show-coverage:
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
@ -165,4 +199,76 @@ ifneq ($(USE_LIBBACKTRACE), 0)
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif
############
## Format ##
############
.PHONY: build-nph install-nph-hook clean-nph print-nph-path
# Default location for nph binary shall be next to nim binary to make it available on the path.
NPH:=$(shell dirname $(NIM_BINARY))/nph
build-nph:
ifeq ("$(wildcard $(NPH))","")
$(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \
mv vendor/nph/src/nph $(shell dirname $(NPH))
echo "nph utility is available at " $(NPH)
endif
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit
install-nph-hook: build-nph
ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","")
cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK)
else
echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override"
exit 1
endif
nph/%: build-nph
echo -e $(FORMAT_MSG) "nph/$*" && \
$(NPH) $*
format:
$(NPH) *.nim
$(NPH) codex/
$(NPH) tests/
$(NPH) library/
clean-nph:
rm -f $(NPH)
# To avoid hardcoding nph binary location in several places
print-nph-path:
echo "$(NPH)"
clean: | clean-nph
################
## C Bindings ##
################
.PHONY: libstorage
STATIC ?= 0
ifneq ($(strip $(STORAGE_LIB_PARAMS)),)
NIM_PARAMS := $(NIM_PARAMS) $(STORAGE_LIB_PARAMS)
endif
libstorage:
$(MAKE) deps
rm -f build/libstorage*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && \
$(ENV_SCRIPT) nim libstorageStatic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),Windows)
echo -e $(BUILD_MSG) "build/$@.dll" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-G \\\"MSYS Makefiles\\\" -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else ifeq ($(detected_OS),macOS)
echo -e $(BUILD_MSG) "build/$@.dylib" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && \
$(ENV_SCRIPT) nim libstorageDynamic $(NIM_PARAMS) -d:LeopardCmakeFlags="\"-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=Release\"" codex.nims
endif
endif # "variables.mk" was not included

189
README.md
View File

@ -1,22 +1,22 @@
# Codex Decentralized Durability Engine
# Logos Storage Decentralized Engine
> The Codex project aims to create a decentralized durability engine that allows persisting data in p2p networks. In other words, it allows storing files and data with predictable durability guarantees for later retrieval.
> The Logos Storage project aims to create a decentralized engine that allows persisting data in p2p networks.
> WARNING: This project is under active development and is considered pre-alpha.
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/codex-storage/nim-codex/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/codex-storage/nim-codex/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/codex-storage/nim-codex)
[![CI](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/ci.yml?query=branch%3Amaster)
[![Docker](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim/actions/workflows/docker.yml?query=branch%3Amaster)
[![Codecov](https://codecov.io/gh/logos-storage/logos-storage-nim/branch/master/graph/badge.svg?token=XFmCyPSNzW)](https://codecov.io/gh/logos-storage/logos-storage-nim)
[![Discord](https://img.shields.io/discord/895609329053474826)](https://discord.gg/CaJTh24ddQ)
![Docker Pulls](https://img.shields.io/docker/pulls/codexstorage/nim-codex)
## Build and Run
For detailed instructions on preparing to build nim-codex see [*Building Codex*](BUILDING.md).
For detailed instructions on preparing to build logos-storagenim see [*Build Logos Storage*](https://docs.codex.storage/learn/build).
To build the project, clone it and run:
@ -29,119 +29,88 @@ The executable will be placed under the `build` directory under the project root
Run the client with:
```bash
build/codex
build/storage
```
## Configuration
It is possible to configure a Codex node in several ways:
It is possible to configure a Logos Storage node in several ways:
1. CLI options
2. Env. variable
3. Config
2. Environment variables
3. Configuration file
The order of priority is the same as above: Cli arguments > Env variables > Config file values.
The order of priority is the same as above: CLI options --> Environment variables --> Configuration file.
### Environment variables
Please check [documentation](https://docs.codex.storage/learn/run#configuration) for more information.
In order to set a configuration option using environment variables, first find the desired CLI option
and then transform it in the following way:
## Guides
1. prepend it with `CODEX_`
2. make it uppercase
3. replace `-` with `_`
For example, to configure `--log-level`, use `CODEX_LOG_LEVEL` as the environment variable name.
### Configuration file
A [TOML](https://toml.io/en/) configuration file can also be used to set configuration values. Configuration option names and corresponding values are placed in the file, separated by `=`. Configuration option names can be obtained from the `codex --help` command, and should not include the `--` prefix. For example, a node's log level (`--log-level`) can be configured using TOML as follows:
```toml
log-level = "trace"
```
The Codex node can then read the configuration from this file using the `--config-file` CLI parameter, like `codex --config-file=/path/to/your/config.toml`.
### CLI Options
```
build/codex --help
Usage:
codex [OPTIONS]... command
The following options are available:
--config-file Loads the configuration from a TOML file [=none].
--log-level Sets the log level [=info].
--metrics Enable the metrics server [=false].
--metrics-address Listening address of the metrics server [=127.0.0.1].
--metrics-port Listening HTTP port of the metrics server [=8008].
-d, --data-dir The directory where codex will store configuration and data.
-i, --listen-addrs Multi Addresses to listen on [=/ip4/0.0.0.0/tcp/0].
-a, --nat IP Addresses to announce behind a NAT [=127.0.0.1].
-e, --disc-ip Discovery listen address [=0.0.0.0].
-u, --disc-port Discovery (UDP) port [=8090].
--net-privkey Source of network (secp256k1) private key file path or name [=key].
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting to the network.
--max-peers The maximum number of peers to connect to [=160].
--agent-string Node agent string which is used as identifier in network [=Codex].
--api-bindaddr The REST API bind address [=127.0.0.1].
-p, --api-port The REST Api port [=8080].
--repo-kind Backend for main repo store (fs, sqlite) [=fs].
-q, --storage-quota The size of the total storage quota dedicated to the node [=8589934592].
-t, --block-ttl Default block timeout in seconds - 0 disables the ttl [=$DefaultBlockTtl].
--block-mi Time interval in seconds - determines frequency of block maintenance cycle: how
often blocks are checked for expiration and cleanup
[=$DefaultBlockMaintenanceInterval].
--block-mn Number of blocks to check every maintenance cycle [=1000].
-c, --cache-size The size of the block cache, 0 disables the cache - might help on slow hardrives
[=0].
Available sub-commands:
codex persistence [OPTIONS]... command
The following options are available:
--eth-provider The URL of the JSON-RPC API of the Ethereum node [=ws://localhost:8545].
--eth-account The Ethereum account that is used for storage contracts.
--eth-private-key File containing Ethereum private key for storage contracts.
--marketplace-address Address of deployed Marketplace contract.
--validator Enables validator, requires an Ethereum node [=false].
--validator-max-slots Maximum number of slots that the validator monitors [=1000].
Available sub-commands:
codex persistence prover [OPTIONS]...
The following options are available:
--circom-r1cs The r1cs file for the storage circuit.
--circom-wasm The wasm file for the storage circuit.
--circom-zkey The zkey file for the storage circuit.
--circom-no-zkey Ignore the zkey file - use only for testing! [=false].
--proof-samples Number of samples to prove [=5].
--max-slot-depth The maximum depth of the slot tree [=32].
--max-dataset-depth The maximum depth of the dataset tree [=8].
--max-block-depth The maximum depth of the network block merkle tree [=5].
--max-cell-elements The maximum number of elements in a cell [=67].
```
#### Logging
Codex uses [Chronicles](https://github.com/status-im/nim-chronicles) logging library, which allows great flexibility in working with logs.
Chronicles has the concept of topics, which categorize log entries into semantic groups.
Using the `log-level` parameter, you can set the top-level log level like `--log-level="trace"`, but more importantly,
you can set log levels for specific topics like `--log-level="info; trace: marketplace,node; error: blockexchange"`,
which sets the top-level log level to `info` and then for topics `marketplace` and `node` sets the level to `trace` and so on.
### Guides
To get acquainted with Codex, consider:
* running the simple [Codex Two-Client Test](docs/TwoClientTest.md) for a start, and;
* if you are feeling more adventurous, try [Running a Local Codex Network with Marketplace Support](docs/Marketplace.md) using a local blockchain as well.
To get acquainted with Logos Storage, consider:
* running the simple [Logos Storage Two-Client Test](https://docs.codex.storage/learn/local-two-client-test) for a start, and;
* if you are feeling more adventurous, try [Running a Local Logos Storage Network with Marketplace Support](https://docs.codex.storage/learn/local-marketplace) using a local blockchain as well.
## API
The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage).
## Bindings
Logos Storage provides a C API that can be wrapped by other languages. The bindings is located in the `library` folder.
Currently, only a Go binding is included.
### Build the C library
```bash
make libstorage
```
This produces the shared library under `build/`.
### Run the Go example
Build the Go example:
```bash
go build -o storage-go examples/golang/storage.go
```
Export the library path:
```bash
export LD_LIBRARY_PATH=build
```
Run the example:
```bash
./storage-go
```
### Static vs Dynamic build
By default, Logos Storage builds a dynamic library (`libstorage.so`), which you can load at runtime.
If you prefer a static library (`libstorage.a`), set the `STATIC` flag:
```bash
# Build dynamic (default)
make libstorage
# Build static
make STATIC=1 libstorage
```
### Limitation
Callbacks must be fast and non-blocking; otherwise, the working thread will hang and prevent other requests from being processed.
## Contributing and development
Feel free to dive in, contributions are welcomed! Open an issue or submit PRs.
### Linting and formatting
`logos-storage-nim` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is required to adhere to its styling.
If you are setting up fresh setup, in order to get `nph` run `make build-nph`.
In order to format files run `make nph/<file/folder you want to format>`.
If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior committing them.
If you are using VSCode and the [NimLang](https://marketplace.visualstudio.com/items?itemName=NimLang.nimlang) extension you can enable "Format On Save" (eq. the `nim.formatOnSave` property) that will format the files using `nph`.

View File

@ -10,17 +10,17 @@ nim c -r run_benchmarks
```
By default all circuit files for each combinations of circuit args will be generated in a unique folder named like:
nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3
Generating the circuit files often takes longer than running benchmarks, so caching the results allows re-running the benchmark as needed.
You can modify the `CircuitArgs` and `CircuitEnv` objects in `runAllBenchMarks` to suite your needs. See `create_circuits.nim` for their definition.
The runner executes all commands relative to the `nim-codex` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
The runner executes all commands relative to the `logos-storage-nim` repo. This simplifies finding the correct circuit includes paths, etc. `CircuitEnv` sets all of this.
## Codex Ark Circom CLI
## Logos Storage Ark Circom CLI
Runs Codex's prover setup with Ark / Circom.
Runs Logos Storage's prover setup with Ark / Circom.
Compile:
```sh

View File

@ -29,10 +29,10 @@ proc findCodexProjectDir(): string =
func default*(tp: typedesc[CircuitEnv]): CircuitEnv =
let codexDir = findCodexProjectDir()
result.nimCircuitCli =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "reference" / "nim" /
codexDir / "vendor" / "logos-storage-proofs-circuits" / "reference" / "nim" /
"proof_input" / "cli"
result.circuitDirIncludes =
codexDir / "vendor" / "codex-storage-proofs-circuits" / "circuit"
codexDir / "vendor" / "logos-storage-proofs-circuits" / "circuit"
result.ptauPath =
codexDir / "benchmarks" / "ceremony" / "powersOfTau28_hez_final_23.ptau"
result.ptauUrl = "https://storage.googleapis.com/zkevm/ptau".parseUri
@ -118,7 +118,7 @@ proc createCircuit*(
##
## All needed circuit files will be generated as needed.
## They will be located in `circBenchDir` which defaults to a folder like:
## `nim-codex/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## `logos-storage-nim/benchmarks/circuit_bench_depth32_maxslots256_cellsize2048_blocksize65536_nsamples9_entropy1234567_seed12345_nslots11_ncells512_index3`
## with all the given CircuitArgs.
##
let circdir = circBenchDir

View File

@ -53,7 +53,6 @@ template printBenchMarkSummaries*(printRegular=true, printTsv=true) =
for k, v in benchRuns:
echo k, "\t", v.avgTimeSec, "\t", v.count
import std/math
func floorLog2*(x: int): int =

View File

@ -1,10 +1,12 @@
mode = ScriptMode.Verbose
import std/os except commandLineParams
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
proc buildBinary(srcName: string, outName = os.lastPathPart(srcName), srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
when compiles(commandLineParams):
@ -14,39 +16,84 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
for i in 2 ..< paramCount():
extra_params &= " " & paramStr(i)
let cmd = "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
let
# Place build output in 'build' folder, even if name includes a longer path.
cmd =
"nim " & lang & " --out:build/" & outName & " " & extra_params & " " & srcDir &
srcName & ".nim"
exec(cmd)
proc test(name: string, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, srcDir, params
exec "build/" & name
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "dynamic") =
if not dirExists "build":
mkDir "build"
task codex, "build codex binary":
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
if `type` == "dynamic":
let lib_name = (
when defined(windows): name & ".dll"
elif defined(macosx): name & ".dylib"
else: name & ".so"
)
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " & "-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " & params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --d:metrics " &
"--nimMainPrefix:libstorage -d:noSignalHandler " &
"-d:LeopardExtraCompilerFlags=-fPIC " &
"-d:chronicles_runtime_filtering " &
"-d:chronicles_log_level=TRACE " &
params & " " & srcDir & name & ".nim"
task testCodex, "Build & run Codex tests":
test "testCodex", params = "-d:codex_enable_proof_failures=true"
proc test(name: string, outName = name, srcDir = "tests/", params = "", lang = "c") =
buildBinary name, outName, srcDir, params
exec "build/" & outName
task testContracts, "Build & run Codex Contract tests":
task storage, "build logos storage binary":
buildBinary "codex",
outname = "storage",
params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE"
task toolsCirdl, "build tools/cirdl binary":
buildBinary "tools/cirdl/cirdl"
task testStorage, "Build & run Logos Storage tests":
test "testCodex", outName = "testStorage", params = "-d:storage_enable_proof_failures=true"
task testContracts, "Build & run Logos Storage Contract tests":
test "testContracts"
task testIntegration, "Run integration tests":
buildBinary "codex", params = "-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:codex_enable_proof_failures=true"
buildBinary "codex",
outName = "storage",
params =
"-d:chronicles_runtime_filtering -d:chronicles_log_level=TRACE -d:storage_enable_proof_failures=true"
test "testIntegration"
# use params to enable logging from the integration test executable
# test "testIntegration", params = "-d:chronicles_sinks=textlines[notimestamps,stdout],textlines[dynamic] " &
# "-d:chronicles_enabled_topics:integration:TRACE"
task build, "build codex binary":
codexTask()
task build, "build Logos Storage binary":
storageTask()
task test, "Run tests":
testCodexTask()
testStorageTask()
task testTools, "Run Tools tests":
toolsCirdlTask()
test "testTools"
task testAll, "Run all tests (except for Taiko L2 tests)":
testCodexTask()
testStorageTask()
testContractsTask()
testIntegrationTask()
testToolsTask()
task testTaiko, "Run Taiko L2 tests":
codexTask()
storageTask()
test "testTaiko"
import strutils
@ -72,20 +119,50 @@ task coverage, "generates code coverage report":
var nimSrcs = " "
for f in walkDirRec("codex", {pcFile}):
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
if f.endswith(".nim"):
nimSrcs.add " " & f.absolutePath.quoteShell()
echo "======== Running Tests ======== "
test "coverage", srcDir = "tests/", params = " --nimcache:nimcache/coverage -d:release -d:codex_enable_proof_failures=true"
test "coverage",
srcDir = "tests/",
params =
" --nimcache:nimcache/coverage -d:release -d:storage_enable_proof_failures=true"
exec("rm nimcache/coverage/*.c")
rmDir("coverage"); mkDir("coverage")
rmDir("coverage")
mkDir("coverage")
echo " ======== Running LCOV ======== "
exec("lcov --capture --directory nimcache/coverage --output-file coverage/coverage.info")
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
exec(
"lcov --capture --keep-going --directory nimcache/coverage --output-file coverage/coverage.info"
)
exec(
"lcov --extract coverage/coverage.info --keep-going --output-file coverage/coverage.f.info " &
nimSrcs
)
echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
exec("genhtml coverage/coverage.f.info --keep-going --output-directory coverage/report ")
echo " ======== Coverage report Done ======== "
task showCoverage, "open coverage html":
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")
task libstorageDynamic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "dynamic"
task libstorageStatic, "Generate bindings":
var params = ""
when compiles(commandLineParams):
for param in commandLineParams():
if param.len > 0 and param.startsWith("-"):
params.add " " & param
let name = "libstorage"
buildLibrary name, "library/", params, "static"

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -28,7 +28,6 @@ import ./codex/codextypes
export codex, conf, libp2p, chronos, logutils
when isMainModule:
import std/sequtils
import std/os
import pkg/confutils/defs
import ./codex/utils/fileutils
@ -39,34 +38,42 @@ when isMainModule:
when defined(posix):
import system/ansi_c
type
CodexStatus {.pure.} = enum
Stopped,
Stopping,
type CodexStatus {.pure.} = enum
Stopped
Stopping
Running
let config = CodexConf.load(
version = codexFullVersion,
envVarsPrefix = "codex",
secondarySources = proc (config: CodexConf, sources: auto) =
envVarsPrefix = "storage",
secondarySources = proc(
config: CodexConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} =
if configFile =? config.configFile:
sources.addConfigFile(Toml, configFile)
,
)
config.setupLogging()
config.setupMetrics()
if config.nat == ValidIpAddress.init(IPv4_any()):
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
try:
updateLogLevel(config.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
if config.nat == ValidIpAddress.init("127.0.0.1"):
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
config.setupMetrics()
if not (checkAndCreateDataDir((config.dataDir).string)):
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
if config.prover() and not (checkAndCreateDataDir((config.circuitDir).string)):
quit QuitFailure
trace "Data dir initialized", dir = $config.dataDir
if not (checkAndCreateDataDir((config.dataDir / "repo"))):
@ -88,25 +95,28 @@ when isMainModule:
config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!")
server = try:
server =
try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
error "Failed to start Logos Storage", msg = exc.msg
quit QuitFailure
## Ctrl+C handling
proc doShutdown() =
shutdown = server.stop()
shutdown = server.shutdown()
state = CodexStatus.Stopping
notice "Stopping Codex"
notice "Stopping Logos Storage"
proc controlCHandler() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
try:
setupForeignThreadGc()
except Exception as exc: raiseAssert exc.msg # shouldn't happen
except Exception as exc:
raiseAssert exc.msg
# shouldn't happen
notice "Shutting down after having received SIGINT"
doShutdown()
@ -128,7 +138,7 @@ when isMainModule:
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
error "Logos Storage failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
@ -149,7 +159,7 @@ when isMainModule:
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
error "Logos Storage didn't shutdown correctly", error = error.msg
quit QuitFailure
notice "Exited codex"
notice "Exited Storage"

View File

@ -1,5 +1,5 @@
version = "0.1.0"
author = "Codex Team"
author = "Logos Storage Team"
description = "p2p data durability engine"
license = "MIT"
binDir = "build"

View File

@ -1,10 +1,5 @@
import ./blockexchange/[
network,
engine,
peers]
import ./blockexchange/[network, engine, peers]
import ./blockexchange/protobuf/[
blockexc,
presence]
import ./blockexchange/protobuf/[blockexc, presence]
export network, engine, blockexc, presence, peers

View File

@ -1,5 +1,6 @@
import ./engine/discovery
import ./engine/advertiser
import ./engine/engine
import ./engine/payments
export discovery, engine, payments
export discovery, advertiser, engine, payments

View File

@ -0,0 +1,181 @@
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import pkg/chronos
import pkg/libp2p/cid
import pkg/libp2p/multicodec
import pkg/metrics
import pkg/questionable
import pkg/questionable/results
import ../protobuf/presence
import ../peers
import ../../utils
import ../../utils/exceptions
import ../../utils/trackedfutures
import ../../discovery
import ../../stores/blockstore
import ../../logutils
import ../../manifest
logScope:
topics = "codex discoveryengine advertiser"
declareGauge(codex_inflight_advertise, "inflight advertise requests")
const
DefaultConcurrentAdvertRequests = 10
DefaultAdvertiseLoopSleep = 30.minutes
type Advertiser* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
discovery*: Discovery # Discovery interface
advertiserRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
advertiseLocalStoreLoop*: Future[void].Raising([]) # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
trackedFutures*: TrackedFutures # Advertise tasks futures
advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
proc addCidToQueue(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
if cid notin b.advertiseQueue:
await b.advertiseQueue.put(cid)
trace "Advertising", cid
proc advertiseBlock(b: Advertiser, cid: Cid) {.async: (raises: [CancelledError]).} =
without isM =? cid.isManifest, err:
warn "Unable to determine if cid is manifest"
return
try:
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
return
without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg
return
# announce manifest cid and tree cid
await b.addCidToQueue(cid)
await b.addCidToQueue(manifest.treeCid)
except CancelledError as exc:
trace "Cancelled advertise block", cid
raise exc
except CatchableError as e:
error "failed to advertise block", cid, error = e.msgDetail
proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} =
try:
while b.advertiserRunning:
if cidsIter =? await b.localStore.listBlocks(blockType = BlockType.Manifest):
trace "Advertiser begins iterating blocks..."
for c in cidsIter:
if cid =? await c:
await b.advertiseBlock(cid)
trace "Advertiser iterating blocks finished."
await sleepAsync(b.advertiseLocalStoreLoopSleep)
except CancelledError:
warn "Cancelled advertise local store loop"
info "Exiting advertise task loop"
proc processQueueLoop(b: Advertiser) {.async: (raises: []).} =
try:
while b.advertiserRunning:
let cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs:
continue
let request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
defer:
b.inFlightAdvReqs.del(cid)
codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64)
await request
except CancelledError:
warn "Cancelled advertise task runner"
info "Exiting advertise task runner"
proc start*(b: Advertiser) {.async: (raises: []).} =
## Start the advertiser
##
trace "Advertiser start"
# The advertiser is expected to be started only once.
if b.advertiserRunning:
raiseAssert "Advertiser can only be started once — this should not happen"
proc onBlock(cid: Cid) {.async: (raises: []).} =
try:
await b.advertiseBlock(cid)
except CancelledError:
trace "Cancelled advertise block", cid
doAssert(b.localStore.onBlockStored.isNone())
b.localStore.onBlockStored = onBlock.some
b.advertiserRunning = true
for i in 0 ..< b.concurrentAdvReqs:
let fut = b.processQueueLoop()
b.trackedFutures.track(fut)
b.advertiseLocalStoreLoop = advertiseLocalStoreLoop(b)
b.trackedFutures.track(b.advertiseLocalStoreLoop)
proc stop*(b: Advertiser) {.async: (raises: []).} =
## Stop the advertiser
##
trace "Advertiser stop"
if not b.advertiserRunning:
warn "Stopping advertiser without starting it"
return
b.advertiserRunning = false
# Stop incoming tasks from callback and localStore loop
b.localStore.onBlockStored = CidCallback.none
trace "Stopping advertise loop and tasks"
await b.trackedFutures.cancelTracked()
trace "Advertiser loop and tasks stopped"
proc new*(
T: type Advertiser,
localStore: BlockStore,
discovery: Discovery,
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
advertiseLocalStoreLoopSleep = DefaultAdvertiseLoopSleep,
): Advertiser =
## Create a advertiser instance
##
Advertiser(
localStore: localStore,
discovery: discovery,
concurrentAdvReqs: concurrentAdvReqs,
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
trackedFutures: TrackedFutures.new(),
inFlightAdvReqs: initTable[Cid, Future[void]](),
advertiseLocalStoreLoopSleep: advertiseLocalStoreLoopSleep,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -8,6 +8,7 @@
## those terms.
import std/sequtils
import std/algorithm
import pkg/chronos
import pkg/libp2p/cid
@ -23,6 +24,7 @@ import ../network
import ../peers
import ../../utils
import ../../utils/trackedfutures
import ../../discovery
import ../../stores/blockstore
import ../../logutils
@ -31,167 +33,107 @@ import ../../manifest
logScope:
topics = "codex discoveryengine"
declareGauge(codexInflightDiscovery, "inflight discovery requests")
declareGauge(codex_inflight_discovery, "inflight discovery requests")
const
DefaultConcurrentDiscRequests = 10
DefaultConcurrentAdvertRequests = 10
DefaultDiscoveryTimeout = 1.minutes
DefaultMinPeersPerBlock = 3
DefaultMaxPeersPerBlock = 8
DefaultDiscoveryLoopSleep = 3.seconds
DefaultAdvertiseLoopSleep = 30.minutes
type
DiscoveryEngine* = ref object of RootObj
type DiscoveryEngine* = ref object of RootObj
localStore*: BlockStore # Local block store for this instance
peers*: PeerCtxStore # Peer context store
network*: BlockExcNetwork # Network interface
discovery*: Discovery # Discovery interface
pendingBlocks*: PendingBlocksManager # Blocks we're awaiting to be resolved
discEngineRunning*: bool # Indicates if discovery is running
concurrentAdvReqs: int # Concurrent advertise requests
concurrentDiscReqs: int # Concurrent discovery requests
advertiseLoop*: Future[void] # Advertise loop task handle
advertiseQueue*: AsyncQueue[Cid] # Advertise queue
advertiseTasks*: seq[Future[void]] # Advertise tasks
discoveryLoop*: Future[void] # Discovery loop task handle
discoveryLoop*: Future[void].Raising([]) # Discovery loop task handle
discoveryQueue*: AsyncQueue[Cid] # Discovery queue
discoveryTasks*: seq[Future[void]] # Discovery tasks
minPeersPerBlock*: int # Max number of peers with block
trackedFutures*: TrackedFutures # Tracked Discovery tasks futures
minPeersPerBlock*: int # Min number of peers with block
maxPeersPerBlock*: int # Max number of peers with block
discoveryLoopSleep: Duration # Discovery loop sleep
advertiseLoopSleep: Duration # Advertise loop sleep
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
advertiseType*: BlockType # Advertice blocks, manifests or both
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]]
# Inflight discovery requests
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
proc cleanupExcessPeers(b: DiscoveryEngine, cid: Cid) {.gcsafe, raises: [].} =
var haves = b.peers.peersHave(cid)
let count = haves.len - b.maxPeersPerBlock
if count <= 0:
return
haves.sort(
proc(a, b: BlockExcPeerCtx): int =
cmp(a.lastExchange, b.lastExchange)
)
let toRemove = haves[0 ..< count]
for peer in toRemove:
try:
peer.cleanPresence(BlockAddress.init(cid))
trace "Removed block presence from peer", cid, peer = peer.id
except CatchableError as exc:
error "Failed to clean presence for peer",
cid, peer = peer.id, error = exc.msg, name = exc.name
proc discoveryQueueLoop(b: DiscoveryEngine) {.async: (raises: []).} =
try:
while b.discEngineRunning:
for cid in toSeq(b.pendingBlocks.wantListBlockCids):
try:
await b.discoveryQueue.put(cid)
except CancelledError:
trace "Discovery loop cancelled"
return
except CatchableError as exc:
warn "Exception in discovery loop", exc = exc.msg
logScope:
sleep = b.discoveryLoopSleep
wanted = b.pendingBlocks.len
await sleepAsync(b.discoveryLoopSleep)
proc advertiseBlock(b: DiscoveryEngine, cid: Cid) {.async.} =
without isM =? cid.isManifest, err:
warn "Unable to determine if cid is manifest"
return
if isM:
without blk =? await b.localStore.getBlock(cid), err:
error "Error retrieving manifest block", cid, err = err.msg
return
without manifest =? Manifest.decode(blk), err:
error "Unable to decode as manifest", err = err.msg
return
# announce manifest cid and tree cid
await b.advertiseQueue.put(cid)
await b.advertiseQueue.put(manifest.treeCid)
proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
while b.discEngineRunning:
if cids =? await b.localStore.listBlocks(blockType = b.advertiseType):
trace "Begin iterating blocks..."
for c in cids:
if cid =? await c:
b.advertiseBlock(cid)
await sleepAsync(100.millis)
trace "Iterating blocks finished."
await sleepAsync(b.advertiseLoopSleep)
info "Exiting advertise task loop"
proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
## Run advertise tasks
##
while b.discEngineRunning:
try:
let
cid = await b.advertiseQueue.get()
if cid in b.inFlightAdvReqs:
continue
try:
let
request = b.discovery.provide(cid)
b.inFlightAdvReqs[cid] = request
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
await request
finally:
b.inFlightAdvReqs.del(cid)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
except CancelledError:
trace "Advertise task cancelled"
return
except CatchableError as exc:
warn "Exception in advertise task runner", exc = exc.msg
trace "Discovery loop cancelled"
info "Exiting advertise task runner"
proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
proc discoveryTaskLoop(b: DiscoveryEngine) {.async: (raises: []).} =
## Run discovery tasks
##
while b.discEngineRunning:
try:
let
cid = await b.discoveryQueue.get()
while b.discEngineRunning:
let cid = await b.discoveryQueue.get()
if cid in b.inFlightDiscReqs:
trace "Discovery request already in progress", cid
continue
let
haves = b.peers.peersHave(cid)
trace "Running discovery task for cid", cid
let haves = b.peers.peersHave(cid)
if haves.len > b.maxPeersPerBlock:
trace "Cleaning up excess peers",
cid, peers = haves.len, max = b.maxPeersPerBlock
b.cleanupExcessPeers(cid)
continue
if haves.len < b.minPeersPerBlock:
try:
let
request = b.discovery
.find(cid)
.wait(DefaultDiscoveryTimeout)
let request = b.discovery.find(cid)
b.inFlightDiscReqs[cid] = request
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
let
peers = await request
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
let
dialed = await allFinished(
peers.mapIt( b.network.dialPeer(it.data) ))
defer:
b.inFlightDiscReqs.del(cid)
codex_inflight_discovery.set(b.inFlightDiscReqs.len.int64)
if (await request.withTimeout(DefaultDiscoveryTimeout)) and
peers =? (await request).catch:
let dialed = await allFinished(peers.mapIt(b.network.dialPeer(it.data)))
for i, f in dialed:
if f.failed:
await b.discovery.removeProvider(peers[i].data.peerId)
finally:
b.inFlightDiscReqs.del(cid)
codexInflightDiscovery.set(b.inFlightAdvReqs.len.int64)
except CancelledError:
trace "Discovery task cancelled"
return
except CatchableError as exc:
warn "Exception in discovery task runner", exc = exc.msg
info "Exiting discovery task runner"
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) =
for cid in cids:
if cid notin b.discoveryQueue:
try:
@ -199,35 +141,27 @@ proc queueFindBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
except CatchableError as exc:
warn "Exception queueing discovery request", exc = exc.msg
proc queueProvideBlocksReq*(b: DiscoveryEngine, cids: seq[Cid]) {.inline.} =
for cid in cids:
if cid notin b.advertiseQueue:
try:
b.advertiseQueue.putNoWait(cid)
except CatchableError as exc:
warn "Exception queueing discovery request", exc = exc.msg
proc start*(b: DiscoveryEngine) {.async.} =
proc start*(b: DiscoveryEngine) {.async: (raises: []).} =
## Start the discengine task
##
trace "Discovery engine start"
trace "Discovery engine starting"
if b.discEngineRunning:
warn "Starting discovery engine twice"
return
b.discEngineRunning = true
for i in 0..<b.concurrentAdvReqs:
b.advertiseTasks.add(advertiseTaskLoop(b))
for i in 0 ..< b.concurrentDiscReqs:
b.discoveryTasks.add(discoveryTaskLoop(b))
let fut = b.discoveryTaskLoop()
b.trackedFutures.track(fut)
b.advertiseLoop = advertiseQueueLoop(b)
b.discoveryLoop = discoveryQueueLoop(b)
b.discoveryLoop = b.discoveryQueueLoop()
b.trackedFutures.track(b.discoveryLoop)
proc stop*(b: DiscoveryEngine) {.async.} =
trace "Discovery engine started"
proc stop*(b: DiscoveryEngine) {.async: (raises: []).} =
## Stop the discovery engine
##
@ -237,27 +171,9 @@ proc stop*(b: DiscoveryEngine) {.async.} =
return
b.discEngineRunning = false
for task in b.advertiseTasks:
if not task.finished:
trace "Awaiting advertise task to stop"
await task.cancelAndWait()
trace "Advertise task stopped"
for task in b.discoveryTasks:
if not task.finished:
trace "Awaiting discovery task to stop"
await task.cancelAndWait()
trace "Discovery task stopped"
if not b.advertiseLoop.isNil and not b.advertiseLoop.finished:
trace "Awaiting advertise loop to stop"
await b.advertiseLoop.cancelAndWait()
trace "Advertise loop stopped"
if not b.discoveryLoop.isNil and not b.discoveryLoop.finished:
trace "Awaiting discovery loop to stop"
await b.discoveryLoop.cancelAndWait()
trace "Discovery loop stopped"
trace "Stopping discovery loop and tasks"
await b.trackedFutures.cancelTracked()
trace "Discovery loop and tasks stopped"
trace "Discovery engine stopped"
@ -268,12 +184,10 @@ proc new*(
network: BlockExcNetwork,
discovery: Discovery,
pendingBlocks: PendingBlocksManager,
concurrentAdvReqs = DefaultConcurrentAdvertRequests,
concurrentDiscReqs = DefaultConcurrentDiscRequests,
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
minPeersPerBlock = DefaultMinPeersPerBlock,
advertiseType = BlockType.Manifest
maxPeersPerBlock = DefaultMaxPeersPerBlock,
): DiscoveryEngine =
## Create a discovery engine instance for advertising services
##
@ -283,13 +197,11 @@ proc new*(
network: network,
discovery: discovery,
pendingBlocks: pendingBlocks,
concurrentAdvReqs: concurrentAdvReqs,
concurrentDiscReqs: concurrentDiscReqs,
advertiseQueue: newAsyncQueue[Cid](concurrentAdvReqs),
discoveryQueue: newAsyncQueue[Cid](concurrentDiscReqs),
trackedFutures: TrackedFutures.new(),
inFlightDiscReqs: initTable[Cid, Future[seq[SignedPeerRecord]]](),
inFlightAdvReqs: initTable[Cid, Future[void]](),
discoveryLoopSleep: discoveryLoopSleep,
advertiseLoopSleep: advertiseLoopSleep,
minPeersPerBlock: minPeersPerBlock,
advertiseType: advertiseType)
maxPeersPerBlock: maxPeersPerBlock,
)

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,6 +7,8 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/math
import pkg/nitro
import pkg/questionable/results
@ -15,15 +17,13 @@ import ../peers
export nitro
export results
push: {.upraises: [].}
const ChainId* = 0.u256 # invalid chain id for now
const Asset* = EthAddress.zero # invalid ERC20 asset address for now
const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals
func openLedgerChannel*(wallet: WalletRef,
hub: EthAddress,
asset: EthAddress): ?!ChannelId =
func openLedgerChannel*(
wallet: WalletRef, hub: EthAddress, asset: EthAddress
): ?!ChannelId =
wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel)
func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
@ -36,9 +36,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId =
else:
failure "no account set for peer"
func pay*(wallet: WalletRef,
peer: BlockExcPeerCtx,
amount: UInt256): ?!SignedState =
func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState =
if account =? peer.account:
let asset = Asset
let receiver = account.address

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,12 +7,11 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/tables
import std/monotimes
import pkg/upraises
push: {.upraises: [].}
import std/strutils
import pkg/chronos
import pkg/libp2p
@ -25,133 +24,194 @@ import ../../logutils
logScope:
topics = "codex pendingblocks"
declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests")
declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us")
declareGauge(
codex_block_exchange_pending_block_requests,
"codex blockexchange pending block requests",
)
declareGauge(
codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us"
)
const
DefaultBlockTimeout* = 10.minutes
DefaultBlockRetries* = 3000
DefaultRetryInterval* = 2.seconds
type
RetriesExhaustedError* = object of CatchableError
BlockHandle* = Future[Block].Raising([CancelledError, RetriesExhaustedError])
BlockReq* = object
handle*: Future[Block]
inFlight*: bool
handle*: BlockHandle
requested*: ?PeerId
blockRetries*: int
startTime*: int64
PendingBlocksManager* = ref object of RootObj
blockRetries*: int = DefaultBlockRetries
retryInterval*: Duration = DefaultRetryInterval
blocks*: Table[BlockAddress, BlockReq] # pending Block requests
lastInclusion*: Moment # time at which we last included a block into our wantlist
proc updatePendingBlockGauge(p: PendingBlocksManager) =
codex_block_exchange_pending_block_requests.set(p.blocks.len.int64)
proc getWantHandle*(
p: PendingBlocksManager,
address: BlockAddress,
timeout = DefaultBlockTimeout,
inFlight = false): Future[Block] {.async.} =
self: PendingBlocksManager, address: BlockAddress, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
## Add an event for a block
##
try:
if address notin p.blocks:
p.blocks[address] = BlockReq(
self.blocks.withValue(address, blk):
return blk[].handle
do:
let blk = BlockReq(
handle: newFuture[Block]("pendingBlocks.getWantHandle"),
inFlight: inFlight,
startTime: getMonoTime().ticks)
requested: requested,
blockRetries: self.blockRetries,
startTime: getMonoTime().ticks,
)
self.blocks[address] = blk
self.lastInclusion = Moment.now()
p.updatePendingBlockGauge()
return await p.blocks[address].handle.wait(timeout)
except CancelledError as exc:
trace "Blocks cancelled", exc = exc.msg, address
raise exc
except CatchableError as exc:
error "Pending WANT failed or expired", exc = exc.msg
# no need to cancel, it is already cancelled by wait()
raise exc
finally:
p.blocks.del(address)
p.updatePendingBlockGauge()
let handle = blk.handle
proc cleanUpBlock(data: pointer) {.raises: [].} =
self.blocks.del(address)
self.updatePendingBlockGauge()
handle.addCallback(cleanUpBlock)
handle.cancelCallback = proc(data: pointer) {.raises: [].} =
if not handle.finished:
handle.removeCallback(cleanUpBlock)
cleanUpBlock(nil)
self.updatePendingBlockGauge()
return handle
proc getWantHandle*(
p: PendingBlocksManager,
cid: Cid,
timeout = DefaultBlockTimeout,
inFlight = false): Future[Block] =
p.getWantHandle(BlockAddress.init(cid), timeout, inFlight)
self: PendingBlocksManager, cid: Cid, requested: ?PeerId = PeerId.none
): Future[Block] {.async: (raw: true, raises: [CancelledError, RetriesExhaustedError]).} =
self.getWantHandle(BlockAddress.init(cid), requested)
proc completeWantHandle*(
self: PendingBlocksManager, address: BlockAddress, blk: Block
) {.raises: [].} =
## Complete a pending want handle
self.blocks.withValue(address, blockReq):
if not blockReq[].handle.finished:
trace "Completing want handle from provided block", address
blockReq[].handle.complete(blk)
else:
trace "Want handle already completed", address
do:
trace "No pending want handle found for address", address
proc resolve*(
p: PendingBlocksManager,
blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} =
self: PendingBlocksManager, blocksDelivery: seq[BlockDelivery]
) {.gcsafe, raises: [].} =
## Resolve pending blocks
##
for bd in blocksDelivery:
p.blocks.withValue(bd.address, blockReq):
if not blockReq.handle.finished:
self.blocks.withValue(bd.address, blockReq):
if not blockReq[].handle.finished:
trace "Resolving pending block", address = bd.address
let
startTime = blockReq.startTime
startTime = blockReq[].startTime
stopTime = getMonoTime().ticks
retrievalDurationUs = (stopTime - startTime) div 1000
blockReq.handle.complete(bd.blk)
codex_block_exchange_retrieval_time_us.set(retrievalDurationUs)
if retrievalDurationUs > 500000:
warn "High block retrieval time", retrievalDurationUs, address = bd.address
else:
trace "Block handle already finished", address = bd.address
proc setInFlight*(
p: PendingBlocksManager,
address: BlockAddress,
inFlight = true) =
## Set inflight status for a block
func retries*(self: PendingBlocksManager, address: BlockAddress): int =
self.blocks.withValue(address, pending):
result = pending[].blockRetries
do:
result = 0
func decRetries*(self: PendingBlocksManager, address: BlockAddress) =
self.blocks.withValue(address, pending):
pending[].blockRetries -= 1
func retriesExhausted*(self: PendingBlocksManager, address: BlockAddress): bool =
self.blocks.withValue(address, pending):
result = pending[].blockRetries <= 0
func isRequested*(self: PendingBlocksManager, address: BlockAddress): bool =
## Check if a block has been requested to a peer
##
result = false
self.blocks.withValue(address, pending):
result = pending[].requested.isSome
func getRequestPeer*(self: PendingBlocksManager, address: BlockAddress): ?PeerId =
## Returns the peer that requested this block
##
result = PeerId.none
self.blocks.withValue(address, pending):
result = pending[].requested
proc markRequested*(
self: PendingBlocksManager, address: BlockAddress, peer: PeerId
): bool =
## Marks this block as having been requested to a peer
##
p.blocks.withValue(address, pending):
pending[].inFlight = inFlight
if self.isRequested(address):
return false
proc isInFlight*(
p: PendingBlocksManager,
address: BlockAddress): bool =
## Check if a block is in flight
##
self.blocks.withValue(address, pending):
pending[].requested = peer.some
return true
p.blocks.withValue(address, pending):
result = pending[].inFlight
proc clearRequest*(
self: PendingBlocksManager, address: BlockAddress, peer: ?PeerId = PeerId.none
) =
self.blocks.withValue(address, pending):
if peer.isSome:
assert peer == pending[].requested
pending[].requested = PeerId.none
proc contains*(p: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in p.blocks
func contains*(self: PendingBlocksManager, cid: Cid): bool =
BlockAddress.init(cid) in self.blocks
proc contains*(p: PendingBlocksManager, address: BlockAddress): bool =
address in p.blocks
func contains*(self: PendingBlocksManager, address: BlockAddress): bool =
address in self.blocks
iterator wantList*(p: PendingBlocksManager): BlockAddress =
for a in p.blocks.keys:
iterator wantList*(self: PendingBlocksManager): BlockAddress =
for a in self.blocks.keys:
yield a
iterator wantListBlockCids*(p: PendingBlocksManager): Cid =
for a in p.blocks.keys:
iterator wantListBlockCids*(self: PendingBlocksManager): Cid =
for a in self.blocks.keys:
if not a.leaf:
yield a.cid
iterator wantListCids*(p: PendingBlocksManager): Cid =
iterator wantListCids*(self: PendingBlocksManager): Cid =
var yieldedCids = initHashSet[Cid]()
for a in p.blocks.keys:
for a in self.blocks.keys:
let cid = a.cidOrTreeCid
if cid notin yieldedCids:
yieldedCids.incl(cid)
yield cid
iterator wantHandles*(p: PendingBlocksManager): Future[Block] =
for v in p.blocks.values:
iterator wantHandles*(self: PendingBlocksManager): Future[Block] =
for v in self.blocks.values:
yield v.handle
proc wantListLen*(p: PendingBlocksManager): int =
p.blocks.len
proc wantListLen*(self: PendingBlocksManager): int =
self.blocks.len
func len*(p: PendingBlocksManager): int =
p.blocks.len
func len*(self: PendingBlocksManager): int =
self.blocks.len
func new*(T: type PendingBlocksManager): PendingBlocksManager =
PendingBlocksManager()
func new*(
T: type PendingBlocksManager,
retries = DefaultBlockRetries,
interval = DefaultRetryInterval,
): PendingBlocksManager =
PendingBlocksManager(blockRetries: retries, retryInterval: interval)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -21,24 +21,28 @@ import ../../blocktype as bt
import ../../logutils
import ../protobuf/blockexc as pb
import ../protobuf/payments
import ../../utils/trackedfutures
import ./networkpeer
export network, payments
export networkpeer, payments
logScope:
topics = "codex blockexcnetwork"
const
Codec* = "/codex/blockexc/1.0.0"
MaxInflight* = 100
DefaultMaxInflight* = 100
type
WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.}
BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.}
BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
WantListHandler* = proc(peer: PeerId, wantList: WantList) {.async: (raises: []).}
BlocksDeliveryHandler* =
proc(peer: PeerId, blocks: seq[BlockDelivery]) {.async: (raises: []).}
BlockPresenceHandler* =
proc(peer: PeerId, precense: seq[BlockPresence]) {.async: (raises: []).}
AccountHandler* = proc(peer: PeerId, account: Account) {.async: (raises: []).}
PaymentHandler* = proc(peer: PeerId, payment: SignedState) {.async: (raises: []).}
PeerEventHandler* = proc(peer: PeerId) {.async: (raises: [CancelledError]).}
BlockExcHandlers* = object
onWantList*: WantListHandler
@ -46,6 +50,9 @@ type
onPresence*: BlockPresenceHandler
onAccount*: AccountHandler
onPayment*: PaymentHandler
onPeerJoined*: PeerEventHandler
onPeerDeparted*: PeerEventHandler
onPeerDropped*: PeerEventHandler
WantListSender* = proc(
id: PeerId,
@ -54,12 +61,21 @@ type
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] {.gcsafe.}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.}
AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.}
PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.}
sendDontHave: bool = false,
) {.async: (raises: [CancelledError]).}
WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]) {.
async: (raises: [CancelledError])
.}
BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.
async: (raises: [CancelledError])
.}
PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]) {.
async: (raises: [CancelledError])
.}
AccountSender* =
proc(peer: PeerId, account: Account) {.async: (raises: [CancelledError]).}
PaymentSender* =
proc(peer: PeerId, payment: SignedState) {.async: (raises: [CancelledError]).}
BlockExcRequest* = object
sendWantList*: WantListSender
@ -76,6 +92,8 @@ type
request*: BlockExcRequest
getConn: ConnProvider
inflightSema: AsyncSemaphore
maxInflight: int = DefaultMaxInflight
trackedFutures*: TrackedFutures = TrackedFutures()
proc peerId*(b: BlockExcNetwork): PeerId =
## Return peer id
@ -89,27 +107,31 @@ proc isSelf*(b: BlockExcNetwork, peer: PeerId): bool =
return b.peerId == peer
proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} =
proc send*(
b: BlockExcNetwork, id: PeerId, msg: pb.Message
) {.async: (raises: [CancelledError]).} =
## Send message to peer
##
b.peers.withValue(id, peer):
if not (id in b.peers):
trace "Unable to send, peer not found", peerId = id
return
try:
let peer = b.peers[id]
await b.inflightSema.acquire()
await peer[].send(msg)
await peer.send(msg)
except CancelledError as error:
raise error
except CatchableError as err:
error "Error sending message", peer = id, msg = err.msg
finally:
b.inflightSema.release()
do:
trace "Unable to send, peer not found", peerId = id
proc handleWantList(
b: BlockExcNetwork,
peer: NetworkPeer,
list: WantList) {.async.} =
b: BlockExcNetwork, peer: NetworkPeer, list: WantList
) {.async: (raises: []).} =
## Handle incoming want list
##
@ -124,7 +146,8 @@ proc sendWantList*(
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] =
sendDontHave: bool = false,
) {.async: (raw: true, raises: [CancelledError]).} =
## Send a want message to peer
##
@ -135,43 +158,41 @@ proc sendWantList*(
priority: priority,
cancel: cancel,
wantType: wantType,
sendDontHave: sendDontHave) ),
full: full)
sendDontHave: sendDontHave,
)
),
full: full,
)
b.send(id, Message(wantlist: msg))
proc sendWantCancellations*(
b: BlockExcNetwork,
id: PeerId,
addresses: seq[BlockAddress]): Future[void] {.async.} =
b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raises: [CancelledError]).} =
## Informs a remote peer that we're no longer interested in a set of blocks
##
await b.sendWantList(id = id, addresses = addresses, cancel = true)
proc handleBlocksDelivery(
b: BlockExcNetwork,
peer: NetworkPeer,
blocksDelivery: seq[BlockDelivery]) {.async.} =
b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery]
) {.async: (raises: []).} =
## Handle incoming blocks
##
if not b.handlers.onBlocksDelivery.isNil:
await b.handlers.onBlocksDelivery(peer.id, blocksDelivery)
proc sendBlocksDelivery*(
b: BlockExcNetwork,
id: PeerId,
blocksDelivery: seq[BlockDelivery]): Future[void] =
b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery]
) {.async: (raw: true, raises: [CancelledError]).} =
## Send blocks to remote
##
b.send(id, pb.Message(payload: blocksDelivery))
proc handleBlockPresence(
b: BlockExcNetwork,
peer: NetworkPeer,
presence: seq[BlockPresence]) {.async.} =
b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence]
) {.async: (raises: []).} =
## Handle block presence
##
@ -179,18 +200,16 @@ proc handleBlockPresence(
await b.handlers.onPresence(peer.id, presence)
proc sendBlockPresence*(
b: BlockExcNetwork,
id: PeerId,
presence: seq[BlockPresence]): Future[void] =
b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence]
) {.async: (raw: true, raises: [CancelledError]).} =
## Send presence to remote
##
b.send(id, Message(blockPresences: @presence))
proc handleAccount(
network: BlockExcNetwork,
peer: NetworkPeer,
account: Account) {.async.} =
network: BlockExcNetwork, peer: NetworkPeer, account: Account
) {.async: (raises: []).} =
## Handle account info
##
@ -198,27 +217,24 @@ proc handleAccount(
await network.handlers.onAccount(peer.id, account)
proc sendAccount*(
b: BlockExcNetwork,
id: PeerId,
account: Account): Future[void] =
b: BlockExcNetwork, id: PeerId, account: Account
) {.async: (raw: true, raises: [CancelledError]).} =
## Send account info to remote
##
b.send(id, Message(account: AccountMessage.init(account)))
proc sendPayment*(
b: BlockExcNetwork,
id: PeerId,
payment: SignedState): Future[void] =
b: BlockExcNetwork, id: PeerId, payment: SignedState
) {.async: (raw: true, raises: [CancelledError]).} =
## Send payment to remote
##
b.send(id, Message(payment: StateChannelUpdate.init(payment)))
proc handlePayment(
network: BlockExcNetwork,
peer: NetworkPeer,
payment: SignedState) {.async.} =
network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState
) {.async: (raises: []).} =
## Handle payment
##
@ -226,112 +242,150 @@ proc handlePayment(
await network.handlers.onPayment(peer.id, payment)
proc rpcHandler(
b: BlockExcNetwork,
peer: NetworkPeer,
msg: Message) {.raises: [].} =
self: BlockExcNetwork, peer: NetworkPeer, msg: Message
) {.async: (raises: []).} =
## handle rpc messages
##
if msg.wantList.entries.len > 0:
asyncSpawn b.handleWantList(peer, msg.wantList)
self.trackedFutures.track(self.handleWantList(peer, msg.wantList))
if msg.payload.len > 0:
asyncSpawn b.handleBlocksDelivery(peer, msg.payload)
self.trackedFutures.track(self.handleBlocksDelivery(peer, msg.payload))
if msg.blockPresences.len > 0:
asyncSpawn b.handleBlockPresence(peer, msg.blockPresences)
self.trackedFutures.track(self.handleBlockPresence(peer, msg.blockPresences))
if account =? Account.init(msg.account):
asyncSpawn b.handleAccount(peer, account)
self.trackedFutures.track(self.handleAccount(peer, account))
if payment =? SignedState.init(msg.payment):
asyncSpawn b.handlePayment(peer, payment)
self.trackedFutures.track(self.handlePayment(peer, payment))
proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer =
proc getOrCreatePeer(self: BlockExcNetwork, peer: PeerId): NetworkPeer =
## Creates or retrieves a BlockExcNetwork Peer
##
if peer in b.peers:
return b.peers.getOrDefault(peer, nil)
if peer in self.peers:
return self.peers.getOrDefault(peer, nil)
var getConn: ConnProvider = proc(): Future[Connection] {.async, gcsafe, closure.} =
var getConn: ConnProvider = proc(): Future[Connection] {.
async: (raises: [CancelledError])
.} =
try:
return await b.switch.dial(peer, Codec)
trace "Getting new connection stream", peer
return await self.switch.dial(peer, Codec)
except CancelledError as error:
raise error
except CatchableError as exc:
trace "Unable to connect to blockexc peer", exc = exc.msg
if not isNil(b.getConn):
getConn = b.getConn
if not isNil(self.getConn):
getConn = self.getConn
let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} =
b.rpcHandler(p, msg)
let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async: (raises: []).} =
await self.rpcHandler(p, msg)
# create new pubsub peer
let blockExcPeer = NetworkPeer.new(peer, getConn, rpcHandler)
debug "Created new blockexc peer", peer
b.peers[peer] = blockExcPeer
self.peers[peer] = blockExcPeer
return blockExcPeer
proc setupPeer*(b: BlockExcNetwork, peer: PeerId) =
## Perform initial setup, such as want
## list exchange
##
discard b.getOrCreatePeer(peer)
proc dialPeer*(b: BlockExcNetwork, peer: PeerRecord) {.async.} =
proc dialPeer*(self: BlockExcNetwork, peer: PeerRecord) {.async.} =
## Dial a peer
##
if b.isSelf(peer.peerId):
if self.isSelf(peer.peerId):
trace "Skipping dialing self", peer = peer.peerId
return
await b.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
if peer.peerId in self.peers:
trace "Already connected to peer", peer = peer.peerId
return
proc dropPeer*(b: BlockExcNetwork, peer: PeerId) =
await self.switch.connect(peer.peerId, peer.addresses.mapIt(it.address))
proc dropPeer*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
trace "Dropping peer", peer
try:
if not self.switch.isNil:
await self.switch.disconnect(peer)
except CatchableError as error:
warn "Error attempting to disconnect from peer", peer = peer, error = error.msg
if not self.handlers.onPeerDropped.isNil:
await self.handlers.onPeerDropped(peer)
proc handlePeerJoined*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
discard self.getOrCreatePeer(peer)
if not self.handlers.onPeerJoined.isNil:
await self.handlers.onPeerJoined(peer)
proc handlePeerDeparted*(
self: BlockExcNetwork, peer: PeerId
) {.async: (raises: [CancelledError]).} =
## Cleanup disconnected peer
##
b.peers.del(peer)
trace "Cleaning up departed peer", peer
self.peers.del(peer)
if not self.handlers.onPeerDeparted.isNil:
await self.handlers.onPeerDeparted(peer)
method init*(b: BlockExcNetwork) =
method init*(self: BlockExcNetwork) {.raises: [].} =
## Perform protocol initialization
##
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
): Future[void] {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
b.setupPeer(peerId)
await self.handlePeerJoined(peerId)
elif event.kind == PeerEventKind.Left:
await self.handlePeerDeparted(peerId)
else:
b.dropPeer(peerId)
warn "Unknown peer event", event
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
b.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined)
self.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left)
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handler(
conn: Connection, proto: string
): Future[void] {.async: (raises: [CancelledError]).} =
let peerId = conn.peerId
let blockexcPeer = b.getOrCreatePeer(peerId)
let blockexcPeer = self.getOrCreatePeer(peerId)
await blockexcPeer.readLoop(conn) # attach read loop
b.handler = handle
b.codec = Codec
self.handler = handler
self.codec = Codec
proc stop*(self: BlockExcNetwork) {.async: (raises: []).} =
await self.trackedFutures.cancelTracked()
proc new*(
T: type BlockExcNetwork,
switch: Switch,
connProvider: ConnProvider = nil,
maxInflight = MaxInflight): BlockExcNetwork =
maxInflight = DefaultMaxInflight,
): BlockExcNetwork =
## Create a new BlockExcNetwork instance
##
let
self = BlockExcNetwork(
let self = BlockExcNetwork(
switch: switch,
getConn: connProvider,
inflightSema: newAsyncSemaphore(maxInflight))
inflightSema: newAsyncSemaphore(maxInflight),
maxInflight: maxInflight,
)
self.maxIncomingStreams = self.maxInflight
proc sendWantList(
id: PeerId,
@ -340,24 +394,33 @@ proc new*(
cancel: bool = false,
wantType: WantType = WantType.WantHave,
full: bool = false,
sendDontHave: bool = false): Future[void] {.gcsafe.} =
self.sendWantList(
id, cids, priority, cancel,
wantType, full, sendDontHave)
sendDontHave: bool = false,
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave)
proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} =
proc sendWantCancellations(
id: PeerId, addresses: seq[BlockAddress]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendWantCancellations(id, addresses)
proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} =
proc sendBlocksDelivery(
id: PeerId, blocksDelivery: seq[BlockDelivery]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlocksDelivery(id, blocksDelivery)
proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} =
proc sendPresence(
id: PeerId, presence: seq[BlockPresence]
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendBlockPresence(id, presence)
proc sendAccount(id: PeerId, account: Account): Future[void] {.gcsafe.} =
proc sendAccount(
id: PeerId, account: Account
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendAccount(id, account)
proc sendPayment(id: PeerId, payment: SignedState): Future[void] {.gcsafe.} =
proc sendPayment(
id: PeerId, payment: SignedState
): Future[void] {.async: (raw: true, raises: [CancelledError]).} =
self.sendPayment(id, payment)
self.request = BlockExcRequest(
@ -366,7 +429,8 @@ proc new*(
sendBlocksDelivery: sendBlocksDelivery,
sendPresence: sendPresence,
sendAccount: sendAccount,
sendPayment: sendPayment)
sendPayment: sendPayment,
)
self.init()
return self

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,8 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
{.push raises: [].}
import pkg/chronos
import pkg/libp2p
@ -17,78 +16,98 @@ import ../protobuf/blockexc
import ../protobuf/message
import ../../errors
import ../../logutils
import ../../utils/trackedfutures
logScope:
topics = "codex blockexcnetworkpeer"
type
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure.}
const DefaultYieldInterval = 50.millis
RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.}
type
ConnProvider* = proc(): Future[Connection] {.async: (raises: [CancelledError]).}
RPCHandler* = proc(peer: NetworkPeer, msg: Message) {.async: (raises: []).}
NetworkPeer* = ref object of RootObj
id*: PeerId
handler*: RPCHandler
sendConn: Connection
getConn: ConnProvider
yieldInterval*: Duration = DefaultYieldInterval
trackedFutures: TrackedFutures
proc connected*(b: NetworkPeer): bool =
not(isNil(b.sendConn)) and
not(b.sendConn.closed or b.sendConn.atEof)
proc connected*(self: NetworkPeer): bool =
not (isNil(self.sendConn)) and not (self.sendConn.closed or self.sendConn.atEof)
proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
proc readLoop*(self: NetworkPeer, conn: Connection) {.async: (raises: []).} =
if isNil(conn):
trace "No connection to read from", peer = self.id
return
trace "Attaching read loop", peer = self.id, connId = conn.oid
try:
var nextYield = Moment.now() + self.yieldInterval
while not conn.atEof or not conn.closed:
if Moment.now() > nextYield:
nextYield = Moment.now() + self.yieldInterval
trace "Yielding in read loop",
peer = self.id, nextYield = nextYield, interval = self.yieldInterval
await sleepAsync(10.millis)
let
data = await conn.readLp(MaxMessageSize.int)
msg = Message.protobufDecode(data).mapFailure().tryGet()
await b.handler(b, msg)
trace "Received message", peer = self.id, connId = conn.oid
await self.handler(self, msg)
except CancelledError:
trace "Read loop cancelled"
except CatchableError as err:
warn "Exception in blockexc read loop", msg = err.msg
finally:
warn "Detaching read loop", peer = self.id, connId = conn.oid
if self.sendConn == conn:
self.sendConn = nil
await conn.close()
proc connect*(b: NetworkPeer): Future[Connection] {.async.} =
if b.connected:
return b.sendConn
proc connect*(
self: NetworkPeer
): Future[Connection] {.async: (raises: [CancelledError]).} =
if self.connected:
trace "Already connected", peer = self.id, connId = self.sendConn.oid
return self.sendConn
b.sendConn = await b.getConn()
asyncSpawn b.readLoop(b.sendConn)
return b.sendConn
self.sendConn = await self.getConn()
self.trackedFutures.track(self.readLoop(self.sendConn))
return self.sendConn
proc send*(b: NetworkPeer, msg: Message) {.async.} =
let conn = await b.connect()
proc send*(
self: NetworkPeer, msg: Message
) {.async: (raises: [CancelledError, LPStreamError]).} =
let conn = await self.connect()
if isNil(conn):
warn "Unable to get send connection for peer message not sent", peer = b.id
warn "Unable to get send connection for peer message not sent", peer = self.id
return
await conn.writeLp(protobufEncode(msg))
proc broadcast*(b: NetworkPeer, msg: Message) =
proc sendAwaiter() {.async.} =
trace "Sending message", peer = self.id, connId = conn.oid
try:
await b.send(msg)
except CatchableError as exc:
warn "Exception broadcasting message to peer", peer = b.id, exc = exc.msg
asyncSpawn sendAwaiter()
await conn.writeLp(protobufEncode(msg))
except CatchableError as err:
if self.sendConn == conn:
self.sendConn = nil
raise newException(LPStreamError, "Failed to send message: " & err.msg)
func new*(
T: type NetworkPeer,
peer: PeerId,
connProvider: ConnProvider,
rpcHandler: RPCHandler): NetworkPeer =
doAssert(not isNil(connProvider),
"should supply connection provider")
rpcHandler: RPCHandler,
): NetworkPeer =
doAssert(not isNil(connProvider), "should supply connection provider")
NetworkPeer(
id: peer,
getConn: connProvider,
handler: rpcHandler)
handler: rpcHandler,
trackedFutures: TrackedFutures(),
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,29 +25,77 @@ import ../../logutils
export payments, nitro
type
BlockExcPeerCtx* = ref object of RootObj
const
MinRefreshInterval = 1.seconds
MaxRefreshBackoff = 36 # 36 seconds
MaxWantListBatchSize* = 1024 # Maximum blocks to send per WantList message
type BlockExcPeerCtx* = ref object of RootObj
id*: PeerId
blocks*: Table[BlockAddress, Presence] # remote peer have list including price
peerWants*: seq[WantListEntry] # remote peers want lists
wantedBlocks*: HashSet[BlockAddress] # blocks that the peer wants
exchanged*: int # times peer has exchanged with us
lastExchange*: Moment # last time peer has exchanged with us
refreshInProgress*: bool # indicates if a refresh is in progress
lastRefresh*: Moment # last time we refreshed our knowledge of the blocks this peer has
refreshBackoff*: int = 1 # backoff factor for refresh requests
account*: ?Account # ethereum account of this peer
paymentChannel*: ?ChannelId # payment channel id
blocksSent*: HashSet[BlockAddress] # blocks sent to peer
blocksRequested*: HashSet[BlockAddress] # pending block requests to this peer
lastExchange*: Moment # last time peer has sent us a block
activityTimeout*: Duration
lastSentWants*: HashSet[BlockAddress]
# track what wantList we last sent for delta updates
proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] =
toSeq(self.blocks.keys)
proc isKnowledgeStale*(self: BlockExcPeerCtx): bool =
let staleness =
self.lastRefresh + self.refreshBackoff * MinRefreshInterval < Moment.now()
proc peerHaveCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.blocks.keys.toSeq.mapIt(it.cidOrTreeCid).toHashSet
if staleness and self.refreshInProgress:
trace "Cleaning up refresh state", peer = self.id
self.refreshInProgress = false
self.refreshBackoff = 1
proc peerWantsCids*(self: BlockExcPeerCtx): HashSet[Cid] =
self.peerWants.mapIt(it.address.cidOrTreeCid).toHashSet
staleness
proc isBlockSent*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocksSent
proc markBlockAsSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.incl(address)
proc markBlockAsNotSent*(self: BlockExcPeerCtx, address: BlockAddress) =
self.blocksSent.excl(address)
proc refreshRequested*(self: BlockExcPeerCtx) =
trace "Refresh requested for peer", peer = self.id, backoff = self.refreshBackoff
self.refreshInProgress = true
self.lastRefresh = Moment.now()
proc refreshReplied*(self: BlockExcPeerCtx) =
self.refreshInProgress = false
self.lastRefresh = Moment.now()
self.refreshBackoff = min(self.refreshBackoff * 2, MaxRefreshBackoff)
proc havesUpdated(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc wantsUpdated*(self: BlockExcPeerCtx) =
self.refreshBackoff = 1
proc peerHave*(self: BlockExcPeerCtx): HashSet[BlockAddress] =
# XXX: this is ugly an inefficient, but since those will typically
# be used in "joins", it's better to pay the price here and have
# a linear join than to not do it and have a quadratic join.
toHashSet(self.blocks.keys.toSeq)
proc contains*(self: BlockExcPeerCtx, address: BlockAddress): bool =
address in self.blocks
func setPresence*(self: BlockExcPeerCtx, presence: Presence) =
if presence.address notin self.blocks:
self.havesUpdated()
self.blocks[presence.address] = presence
func cleanPresence*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]) =
@ -64,3 +112,36 @@ func price*(self: BlockExcPeerCtx, addresses: seq[BlockAddress]): UInt256 =
price += precense[].price
price
proc blockRequestScheduled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Adds a block the set of blocks that have been requested to this peer
## (its request schedule).
if self.blocksRequested.len == 0:
self.lastExchange = Moment.now()
self.blocksRequested.incl(address)
proc blockRequestCancelled*(self: BlockExcPeerCtx, address: BlockAddress) =
## Removes a block from the set of blocks that have been requested to this peer
## (its request schedule).
self.blocksRequested.excl(address)
proc blockReceived*(self: BlockExcPeerCtx, address: BlockAddress): bool =
let wasRequested = address in self.blocksRequested
self.blocksRequested.excl(address)
self.lastExchange = Moment.now()
wasRequested
proc activityTimer*(
self: BlockExcPeerCtx
): Future[void] {.async: (raises: [CancelledError]).} =
## This is called by the block exchange when a block is scheduled for this peer.
## If the peer sends no blocks for a while, it is considered inactive/uncooperative
## and the peer is dropped. Note that ANY block that the peer sends will reset this
## timer for all blocks.
##
while true:
let idleTime = Moment.now() - self.lastExchange
if idleTime > self.activityTimeout:
return
await sleepAsync(self.activityTimeout - idleTime)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,13 +7,12 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/sequtils
import std/tables
import std/algorithm
import pkg/upraises
push: {.upraises: [].}
import std/sequtils
import pkg/chronos
import pkg/libp2p
@ -22,7 +21,6 @@ import ../protobuf/blockexc
import ../../blocktype
import ../../logutils
import ./peercontext
export peercontext
@ -33,6 +31,8 @@ type
PeerCtxStore* = ref object of RootObj
peers*: OrderedTable[PeerId, BlockExcPeerCtx]
PeersForBlock* = tuple[with: seq[BlockExcPeerCtx], without: seq[BlockExcPeerCtx]]
iterator items*(self: PeerCtxStore): BlockExcPeerCtx =
for p in self.peers.values:
yield p
@ -43,6 +43,9 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool =
a.anyIt(it.id == b)
func peerIds*(self: PeerCtxStore): seq[PeerId] =
toSeq(self.peers.keys)
func contains*(self: PeerCtxStore, peerId: PeerId): bool =
peerId in self.peers
@ -59,43 +62,27 @@ func len*(self: PeerCtxStore): int =
self.peers.len
func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) )
toSeq(self.peers.values).filterIt(address in it.peerHave)
func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid))
func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) )
toSeq(self.peers.values).filterIt(address in it.wantedBlocks)
func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] =
toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) )
# FIXME: this is way slower and can end up leading to unexpected performance loss.
toSeq(self.peers.values).filterIt(it.wantedBlocks.anyIt(it.cidOrTreeCid == cid))
func selectCheapest*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] =
# assume that the price for all leaves in a tree is the same
let rootAddress = BlockAddress(leaf: false, cid: address.cidOrTreeCid)
var peers = self.peersHave(rootAddress)
func cmp(a, b: BlockExcPeerCtx): int =
var
priceA = 0.u256
priceB = 0.u256
a.blocks.withValue(rootAddress, precense):
priceA = precense[].price
b.blocks.withValue(rootAddress, precense):
priceB = precense[].price
if priceA == priceB:
0
elif priceA > priceB:
1
proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock =
var res: PeersForBlock = (@[], @[])
for peer in self:
if address in peer:
res.with.add(peer)
else:
-1
peers.sort(cmp)
trace "Selected cheapest peers", peers = peers.len
return peers
res.without.add(peer)
res
proc new*(T: type PeerCtxStore): PeerCtxStore =
## create new instance of a peer context store

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,7 +9,6 @@
import std/hashes
import std/sequtils
import pkg/stew/endians2
import message
@ -20,13 +19,6 @@ export Wantlist, WantType, WantListEntry
export BlockDelivery, BlockPresenceType, BlockPresence
export AccountMessage, StateChannelUpdate
proc hash*(a: BlockAddress): Hash =
if a.leaf:
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
hash(a.cid.data.buffer)
proc hash*(e: WantListEntry): Hash =
hash(e.address)
@ -42,7 +34,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool =
proc `<`*(a, b: WantListEntry): bool =
a.priority < b.priority
proc `==`*(a: BlockPresence, b: BlockAddress): bool =
return a.address == b

View File

@ -1,4 +1,4 @@
# Protocol of data exchange between Codex nodes
# Protocol of data exchange between Logos Storage nodes
# and Protobuf encoder/decoder for these messages.
#
# Eventually all this code should be auto-generated from message.proto.
@ -20,16 +20,20 @@ const
type
WantType* = enum
WantBlock = 0,
WantBlock = 0
WantHave = 1
WantListEntry* = object
address*: BlockAddress
# XXX: I think explicit priority is pointless as the peer will request
# the blocks in the order it wants to receive them, and all we have to
# do is process those in the same order as we send them back. It also
# complicates things for no reason at the moment, as the priority is
# always set to 0.
priority*: int32 # The priority (normalized). default to 1
cancel*: bool # Whether this revokes an entry
wantType*: WantType # Note: defaults to enum 0, ie Block
sendDontHave*: bool # Note: defaults to false
inFlight*: bool # Whether block sending is in progress. Not serialized.
WantList* = object
entries*: seq[WantListEntry] # A list of wantList entries
@ -41,7 +45,7 @@ type
proof*: ?CodexProof # Present only if `address.leaf` is true
BlockPresenceType* = enum
Have = 0,
Have = 0
DontHave = 1
BlockPresence* = object
@ -97,7 +101,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: WantList) =
pb.write(field, ipb)
proc write*(pb: var ProtoBuffer, field: int, value: BlockDelivery) =
var ipb = initProtoBuffer(maxSize = MaxBlockSize)
var ipb = initProtoBuffer()
ipb.write(1, value.blk.cid.data.buffer)
ipb.write(2, value.blk.data)
ipb.write(3, value.address)
@ -128,7 +132,7 @@ proc write*(pb: var ProtoBuffer, field: int, value: StateChannelUpdate) =
pb.write(field, ipb)
proc protobufEncode*(value: Message): seq[byte] =
var ipb = initProtoBuffer(maxSize = MaxMessageSize)
var ipb = initProtoBuffer()
ipb.write(1, value.wantList)
for v in value.payload:
ipb.write(3, v)
@ -140,7 +144,6 @@ proc protobufEncode*(value: Message): seq[byte] =
ipb.finish()
ipb.buffer
#
# Decoding Message from seq[byte] in Protobuf format
#
@ -211,7 +214,8 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery]
if ?pb.getField(1, cidBuf):
cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(2, dataBuf):
value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
value.blk =
?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob)
if ?pb.getField(3, ipb):
value.address = ?BlockAddress.decode(ipb)
@ -240,28 +244,28 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence]
ok(value)
proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] =
var
value = AccountMessage()
var value = AccountMessage()
discard ?pb.getField(1, value.address)
ok(value)
proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] =
var
value = StateChannelUpdate()
proc decode*(
_: type StateChannelUpdate, pb: ProtoBuffer
): ProtoResult[StateChannelUpdate] =
var value = StateChannelUpdate()
discard ?pb.getField(1, value.update)
ok(value)
proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] =
var
value = Message()
pb = initProtoBuffer(msg, maxSize = MaxMessageSize)
pb = initProtoBuffer(msg)
ipb: ProtoBuffer
sublist: seq[seq[byte]]
if ?pb.getField(1, ipb):
value.wantList = ?WantList.decode(ipb)
if ?pb.getRepeatedField(3, sublist):
for item in sublist:
value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)))
value.payload.add(?BlockDelivery.decode(initProtoBuffer(item)))
if ?pb.getRepeatedField(4, sublist):
for item in sublist:
value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item)))

View File

@ -1,4 +1,4 @@
// Protocol of data exchange between Codex nodes.
// Protocol of data exchange between Logos Storage nodes.
// Extended version of https://github.com/ipfs/specs/blob/main/BITSWAP.md
syntax = "proto3";

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import pkg/stew/byteutils
import pkg/stint
import pkg/nitro
import pkg/questionable
import pkg/upraises
import ./blockexc
export AccountMessage
@ -11,10 +12,7 @@ export StateChannelUpdate
export stint
export nitro
push: {.upraises: [].}
type
Account* = object
type Account* = object
address*: EthAddress
func init*(_: type AccountMessage, account: Account): AccountMessage =

View File

@ -1,8 +1,9 @@
{.push raises: [].}
import libp2p
import pkg/stint
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./blockexc
import ../../blocktype
@ -11,8 +12,6 @@ export questionable
export stint
export BlockPresenceType
upraises.push: {.upraises: [].}
type
PresenceMessage* = blockexc.BlockPresence
Presence* = object
@ -32,15 +31,12 @@ func init*(_: type Presence, message: PresenceMessage): ?Presence =
some Presence(
address: message.address,
have: message.`type` == BlockPresenceType.Have,
price: price
price: price,
)
func init*(_: type PresenceMessage, presence: Presence): PresenceMessage =
PresenceMessage(
address: presence.address,
`type`: if presence.have:
BlockPresenceType.Have
else:
BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE)
`type`: if presence.have: BlockPresenceType.Have else: BlockPresenceType.DontHave,
price: @(presence.price.toBytesBE),
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,15 +9,14 @@
import std/tables
import std/sugar
import std/hashes
export tables
import pkg/upraises
push: {.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/stew/byteutils
import pkg/stew/[byteutils, endians2]
import pkg/questionable
import pkg/questionable/results
@ -31,7 +30,7 @@ import ./codextypes
export errors, logutils, units, codextypes
type
Block* = object of RootObj
Block* = ref object of RootObj
cid*: Cid
data*: seq[byte]
@ -49,11 +48,11 @@ logutils.formatIt(LogFormat.textLines, BlockAddress):
else:
"cid: " & shortLog($it.cid)
logutils.formatIt(LogFormat.json, BlockAddress): %it
logutils.formatIt(LogFormat.json, BlockAddress):
%it
proc `==`*(a, b: BlockAddress): bool =
a.leaf == b.leaf and
(
a.leaf == b.leaf and (
if a.leaf:
a.treeCid == b.treeCid and a.index == b.index
else:
@ -66,11 +65,15 @@ proc `$`*(a: BlockAddress): string =
else:
"cid: " & $a.cid
proc cidOrTreeCid*(a: BlockAddress): Cid =
proc hash*(a: BlockAddress): Hash =
if a.leaf:
a.treeCid
let data = a.treeCid.data.buffer & @(a.index.uint64.toBytesBE)
hash(data)
else:
a.cid
hash(a.cid.data.buffer)
proc cidOrTreeCid*(a: BlockAddress): Cid =
if a.leaf: a.treeCid else: a.cid
proc address*(b: Block): BlockAddress =
BlockAddress(leaf: false, cid: b.cid)
@ -90,7 +93,8 @@ func new*(
data: openArray[byte] = [],
version = CIDv1,
mcodec = Sha256HashCodec,
codec = BlockCodec): ?!Block =
codec = BlockCodec,
): ?!Block =
## creates a new block for both storage and network IO
##
@ -100,15 +104,11 @@ func new*(
# TODO: If the hash is `>=` to the data,
# use the Cid as a container!
Block(
cid: cid,
data: @data).success
Block(cid: cid, data: @data).success
proc new*(
T: type Block,
cid: Cid,
data: openArray[byte],
verify: bool = true
T: type Block, cid: Cid, data: openArray[byte], verify: bool = true
): ?!Block =
## creates a new block for both storage and network IO
##
@ -121,22 +121,23 @@ proc new*(
if computedCid != cid:
return "Cid doesn't match the data".failure
return Block(
cid: cid,
data: @data
).success
return Block(cid: cid, data: @data).success
proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block =
emptyCid(version, hcodec, BlockCodec)
.flatMap((cid: Cid) => Block.new(cid = cid, data = @[]))
emptyCid(version, hcodec, BlockCodec).flatMap(
(cid: Cid) => Block.new(cid = cid, data = @[])
)
proc emptyBlock*(cid: Cid): ?!Block =
cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyBlock(cid.cidver, mhash.mcodec))
cid.mhash.mapFailure.flatMap(
(mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec)
)
proc isEmpty*(cid: Cid): bool =
success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) =>
emptyCid(cid.cidver, mhash.mcodec, cid.mcodec))
success(cid) ==
cid.mhash.mapFailure.flatMap(
(mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)
)
proc isEmpty*(blk: Block): bool =
blk.cid.isEmpty

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,9 +9,7 @@
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
import pkg/upraises
push: {.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/questionable
import pkg/questionable/results
@ -23,13 +21,15 @@ import ./logutils
export blocktype
const
DefaultChunkSize* = DefaultBlockSize
const DefaultChunkSize* = DefaultBlockSize
type
# default reader type
ChunkerError* = object of CatchableError
ChunkBuffer* = ptr UncheckedArray[byte]
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.gcsafe, raises: [Defect].}
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
async: (raises: [ChunkerError, CancelledError])
.}
# Reader that splits input data into fixed-size chunks
Chunker* = ref object
@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
return move buff
proc new*(
T: type Chunker,
reader: Reader,
chunkSize = DefaultChunkSize,
pad = true
T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true
): Chunker =
## create a new Chunker instance
##
Chunker(
reader: reader,
offset: 0,
chunkSize: chunkSize,
pad: pad)
Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad)
proc new*(
T: type LPStreamChunker,
stream: LPStream,
chunkSize = DefaultChunkSize,
pad = true
T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true
): LPStreamChunker =
## create the default File chunker
##
proc reader(data: ChunkBuffer, len: int): Future[int]
{.gcsafe, async, raises: [Defect].} =
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var res = 0
try:
while res < len:
@ -92,28 +83,26 @@ proc new*(
trace "LPStreamChunker stream Eof", exc = exc.msg
except CancelledError as error:
raise error
except LPStreamError as error:
error "LPStream error", err = error.msg
raise newException(ChunkerError, "LPStream error", error)
except CatchableError as exc:
trace "CatchableError exception", exc = exc.msg
error "CatchableError exception", exc = exc.msg
raise newException(Defect, exc.msg)
return res
LPStreamChunker.new(
reader = reader,
chunkSize = chunkSize,
pad = pad)
LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
proc new*(
T: type FileChunker,
file: File,
chunkSize = DefaultChunkSize,
pad = true
T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true
): FileChunker =
## create the default File chunker
##
proc reader(data: ChunkBuffer, len: int): Future[int]
{.gcsafe, async, raises: [Defect].} =
proc reader(
data: ChunkBuffer, len: int
): Future[int] {.async: (raises: [ChunkerError, CancelledError]).} =
var total = 0
try:
while total < len:
@ -127,12 +116,9 @@ proc new*(
except CancelledError as error:
raise error
except CatchableError as exc:
trace "CatchableError exception", exc = exc.msg
error "CatchableError exception", exc = exc.msg
raise newException(Defect, exc.msg)
return total
FileChunker.new(
reader = reader,
chunkSize = chunkSize,
pad = pad)
FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)

View File

@ -1,6 +1,7 @@
{.push raises: [].}
import pkg/chronos
import pkg/stew/endians2
import pkg/upraises
import pkg/stint
type
@ -8,10 +9,12 @@ type
SecondsSince1970* = int64
Timeout* = object of CatchableError
method now*(clock: Clock): SecondsSince1970 {.base, upraises: [].} =
method now*(clock: Clock): SecondsSince1970 {.base, gcsafe, raises: [].} =
raiseAssert "not implemented"
method waitUntil*(clock: Clock, time: SecondsSince1970) {.base, async.} =
method waitUntil*(
clock: Clock, time: SecondsSince1970
) {.base, async: (raises: [CancelledError]).} =
raiseAssert "not implemented"
method start*(clock: Clock) {.base, async.} =
@ -20,9 +23,9 @@ method start*(clock: Clock) {.base, async.} =
method stop*(clock: Clock) {.base, async.} =
discard
proc withTimeout*(future: Future[void],
clock: Clock,
expiry: SecondsSince1970) {.async.} =
proc withTimeout*(
future: Future[void], clock: Clock, expiry: SecondsSince1970
) {.async.} =
let timeout = clock.waitUntil(expiry)
try:
await future or timeout
@ -40,5 +43,8 @@ proc toSecondsSince1970*(bytes: seq[byte]): SecondsSince1970 =
let asUint = uint64.fromBytes(bytes)
cast[int64](asUint)
proc toSecondsSince1970*(num: uint64): SecondsSince1970 =
cast[int64](num)
proc toSecondsSince1970*(bigint: UInt256): SecondsSince1970 =
bigint.truncate(int64)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -12,23 +12,23 @@ import std/strutils
import std/os
import std/tables
import std/cpuinfo
import std/net
import pkg/chronos
import pkg/taskpools
import pkg/presto
import pkg/libp2p
import pkg/confutils
import pkg/confutils/defs
import pkg/nitro
import pkg/stew/io2
import pkg/stew/shims/net as stewnet
import pkg/datastore
import pkg/ethers except Rng
import pkg/stew/io2
import pkg/taskpools
import ./node
import ./conf
import ./rng
import ./rng as random
import ./rest/api
import ./stores
import ./slots
@ -44,6 +44,7 @@ import ./utils/addrutils
import ./namespaces
import ./codextypes
import ./logutils
import ./nat
logScope:
topics = "codex node"
@ -56,10 +57,20 @@ type
repoStore: RepoStore
maintenance: BlockMaintainer
taskpool: Taskpool
isStarted: bool
CodexPrivateKey* = libp2p.PrivateKey # alias
EthWallet = ethers.Wallet
func config*(self: CodexServer): CodexConf =
return self.config
func node*(self: CodexServer): CodexNodeRef =
return self.codexNode
func repoStore*(self: CodexServer): RepoStore =
return self.repoStore
proc waitForSync(provider: Provider): Future[void] {.async.} =
var sleepTime = 1
trace "Checking sync state of Ethereum provider..."
@ -70,8 +81,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} =
inc sleepTime
trace "Ethereum provider is synced."
proc bootstrapInteractions(
s: CodexServer): Future[void] {.async.} =
proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} =
## bootstrap interactions and return contracts
## using clients, hosts, validators pairings
##
@ -84,7 +94,9 @@ proc bootstrapInteractions(
error "Persistence enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(config.ethProvider)
let provider = JsonRpcProvider.new(
config.ethProvider, maxPriorityFeePerGas = config.maxPriorityFeePerGas.u256
)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
@ -104,13 +116,15 @@ proc bootstrapInteractions(
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config)
let deploy = Deployment.new(provider, config.marketplaceAddress)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(marketplace)
let market = OnChainMarket.new(
marketplace, config.rewardRecipient, config.marketplaceRequestCacheSize
)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
@ -122,10 +136,9 @@ proc bootstrapInteractions(
else:
s.codexNode.clock = SystemClock()
if config.persistence:
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
# and hence the proof failure will always be 0.
when codex_enable_proof_failures:
when storage_enable_proof_failures:
let proofFailures = config.simulateProofFailures
if proofFailures > 0:
warn "Enabling proof failure simulation!"
@ -134,189 +147,232 @@ proc bootstrapInteractions(
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
if error =? (await market.loadConfig()).errorOption:
fatal "Cannot load market configuration", error = error.msg
quit QuitFailure
let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales)
if config.validator:
let validation = Validation.new(clock, market, config.validatorMaxSlots)
without validationConfig =?
ValidationConfig.init(
config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex
), err:
error "Invalid validation parameters", err = err.msg
quit QuitFailure
let validation = Validation.new(clock, market, validationConfig)
validator = some ValidatorInteractions.new(clock, validation)
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config
if s.isStarted:
warn "Storage server already started, skipping"
return
trace "Starting Storage node", config = $s.config
await s.repoStore.start()
s.maintenance.start()
await s.codexNode.switch.start()
let
# TODO: Can't define these as constants, pity
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
.expect("Should create multiaddress")
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
.expect("Should create multiaddress")
loopBackAddrIp = MultiAddress.init("/ip4/127.0.0.1/")
.expect("Should create multiaddress")
# announce addresses should be set to bound addresses,
# but the IP should be mapped to the provided nat ip
announceAddrs = s.codexNode.switch.peerInfo.addrs.mapIt:
block:
let
listenIPPart = it[multiCodec("ip4")].expect("Should get IP")
if listenIPPart == anyAddrIp or
(listenIPPart == loopBackAddrIp and natIpPart != loopBackAddrIp):
it.remapAddr(s.config.nat.some)
else:
it
let (announceAddrs, discoveryAddrs) = nattedAddress(
s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort
)
s.codexNode.discovery.updateAnnounceRecord(announceAddrs)
s.codexNode.discovery.updateDhtRecord(s.config.nat, s.config.discoveryPort)
s.codexNode.discovery.updateDhtRecord(discoveryAddrs)
await s.bootstrapInteractions()
await s.codexNode.start()
if s.restServer != nil:
s.restServer.start()
s.isStarted = true
proc stop*(s: CodexServer) {.async.} =
notice "Stopping codex node"
if not s.isStarted:
warn "Storage is not started"
return
notice "Stopping Storage node"
s.taskpool.syncAll()
s.taskpool.shutdown()
await allFuturesThrowing(
s.restServer.stop(),
var futures =
@[
s.codexNode.switch.stop(),
s.codexNode.stop(),
s.repoStore.stop(),
s.maintenance.stop())
s.maintenance.stop(),
]
if s.restServer != nil:
futures.add(s.restServer.stop())
let res = await noCancel allFinishedFailed[void](futures)
if res.failure.len > 0:
error "Failed to stop Storage node", failures = res.failure.len
raiseAssert "Failed to stop Storage node"
proc close*(s: CodexServer) {.async.} =
var futures = @[s.codexNode.close(), s.repoStore.close()]
let res = await noCancel allFinishedFailed[void](futures)
if not s.taskpool.isNil:
try:
s.taskpool.shutdown()
except Exception as exc:
error "Failed to stop the taskpool", failures = res.failure.len
raiseAssert("Failure in taskpool shutdown:" & exc.msg)
if res.failure.len > 0:
error "Failed to close Storage node", failures = res.failure.len
raiseAssert "Failed to close Storage node"
proc shutdown*(server: CodexServer) {.async.} =
await server.stop()
await server.close()
proc new*(
T: type CodexServer,
config: CodexConf,
privateKey: CodexPrivateKey): CodexServer =
T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey
): CodexServer =
## create CodexServer including setting up datastore, repostore, etc
let
switch = SwitchBuilder
let switch = SwitchBuilder
.new()
.withPrivateKey(privateKey)
.withAddresses(config.listenAddrs)
.withRng(Rng.instance())
.withRng(random.Rng.instance())
.withNoise()
.withMplex(5.minutes, 5.minutes)
.withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString)
.withSignedPeerRecord(true)
.withTcpTransport({ServerFlags.ReuseAddr})
.withTcpTransport({ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay})
.build()
var
cache: CacheStore = nil
taskpool: Taskpool
try:
if config.numThreads == ThreadCount(0):
taskpool = Taskpool.new(numThreads = min(countProcessors(), 16))
else:
taskpool = Taskpool.new(numThreads = int(config.numThreads))
info "Threadpool started", numThreads = taskpool.numThreads
except CatchableError as exc:
raiseAssert("Failure in taskpool initialization:" & exc.msg)
if config.cacheSize > 0'nb:
cache = CacheStore.new(cacheSize = config.cacheSize)
## Is unused?
let
discoveryDir = config.dataDir / CodexDhtNamespace
let discoveryDir = config.dataDir / CodexDhtNamespace
if io2.createPath(discoveryDir).isErr:
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
trace "Unable to create discovery directory for block store",
discoveryDir = discoveryDir
raise (ref Defect)(
msg: "Unable to create discovery directory for block store: " & discoveryDir)
msg: "Unable to create discovery directory for block store: " & discoveryDir
)
let
discoveryStore = Datastore(
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
.expect("Should create discovery datastore!"))
LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect(
"Should create discovery datastore!"
)
)
discovery = Discovery.new(
switch.peerInfo.privateKey,
announceAddrs = config.listenAddrs,
bindIp = config.discoveryIp,
bindPort = config.discoveryPort,
bootstrapNodes = config.bootstrapNodes,
store = discoveryStore)
store = discoveryStore,
)
wallet = WalletRef.new(EthPrivateKey.random())
network = BlockExcNetwork.new(switch)
repoData = case config.repoKind
of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5)
.expect("Should create repo file data store!"))
of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir)
.expect("Should create repo SQLite data store!"))
of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir)
.expect("Should create repo LevelDB data store!"))
repoData =
case config.repoKind
of repoFS:
Datastore(
FSDatastore.new($config.dataDir, depth = 5).expect(
"Should create repo file data store!"
)
)
of repoSQLite:
Datastore(
SQLiteDatastore.new($config.dataDir).expect(
"Should create repo SQLite data store!"
)
)
of repoLevelDb:
Datastore(
LevelDbDatastore.new($config.dataDir).expect(
"Should create repo LevelDB data store!"
)
)
repoStore = RepoStore.new(
repoDs = repoData,
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace)
.expect("Should create metadata store!"),
quotaMaxBytes = config.storageQuota.uint,
blockTtl = config.blockTtl)
metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect(
"Should create metadata store!"
),
quotaMaxBytes = config.storageQuota,
blockTtl = config.blockTtl,
)
maintenance = BlockMaintainer.new(
repoStore,
interval = config.blockMaintenanceInterval,
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks)
numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks,
)
peerStore = PeerCtxStore.new()
pendingBlocks = PendingBlocksManager.new()
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
pendingBlocks = PendingBlocksManager.new(retries = config.blockRetries)
advertiser = Advertiser.new(repoStore, discovery)
blockDiscovery =
DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(
repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks
)
store = NetworkStore.new(engine, repoStore)
prover = if config.prover:
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
endsWith($config.circomR1cs, ".r1cs"):
error "Circom R1CS file not accessible"
raise (ref Defect)(
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
endsWith($config.circomWasm, ".wasm"):
error "Circom wasm file not accessible"
raise (ref Defect)(
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
let zkey = if not config.circomNoZkey:
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
endsWith($config.circomZkey, ".zkey"):
error "Circom zkey file not accessible"
raise (ref Defect)(
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
$config.circomZkey
else: ""
some Prover.new(
store,
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
config.numProofSamples)
prover =
if config.prover:
let backend =
config.initializeBackend().expect("Unable to create prover backend.")
some Prover.new(store, backend, config.numProofSamples)
else:
none Prover
taskpool = Taskpool.new(num_threads = countProcessors())
codexNode = CodexNodeRef.new(
switch = switch,
networkStore = store,
engine = engine,
prover = prover,
discovery = discovery,
taskpool = taskpool)
prover = prover,
taskPool = taskpool,
)
restServer = RestServerRef.new(
var restServer: RestServerRef = nil
if config.apiBindAddress.isSome:
restServer = RestServerRef
.new(
codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin),
initTAddress(config.apiBindAddress , config.apiPort),
initTAddress(config.apiBindAddress.get(), config.apiPort),
bufferSize = (1024 * 64),
maxRequestBodySize = int.high)
.expect("Should start rest server!")
maxRequestBodySize = int.high,
)
.expect("Should create rest server!")
switch.mount(network)
@ -326,4 +382,5 @@ proc new*(
restServer: restServer,
repoStore: repoStore,
maintenance: maintenance,
taskpool: taskpool)
taskpool: taskpool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -48,18 +48,10 @@ const
SlotProvingRootCodec* = multiCodec("codex-proving-root")
CodexSlotCellCodec* = multiCodec("codex-slot-cell")
CodexHashesCodecs* = [
Sha256HashCodec,
Pos2Bn128SpngCodec,
Pos2Bn128MrklCodec
]
CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec]
CodexPrimitivesCodecs* = [
ManifestCodec,
DatasetRootCodec,
BlockCodec,
SlotRootCodec,
SlotProvingRootCodec,
ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec,
CodexSlotCellCodec,
]
@ -78,24 +70,19 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] =
Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure,
}.toTable
var
table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]()
for hcodec, mhash in PadHashes.pairs:
table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure
success table
proc emptyCid*(
version: CidVersion,
hcodec: MultiCodec,
dcodec: MultiCodec): ?!Cid =
proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid =
## Returns cid representing empty content,
## given cid version, hash codec and data codec
##
var
table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid]
once:
table = ?initEmptyCidTable()
@ -103,11 +90,10 @@ proc emptyCid*(
table[(version, hcodec, dcodec)].catch
proc emptyDigest*(
version: CidVersion,
hcodec: MultiCodec,
dcodec: MultiCodec): ?!MultiHash =
version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec
): ?!MultiHash =
## Returns hash representing empty content,
## given cid version, hash codec and data codec
##
emptyCid(version, hcodec, dcodec)
.flatMap((cid: Cid) => cid.mhash.mapFailure)
emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,10 +10,16 @@
{.push raises: [].}
import std/os
import std/terminal
{.push warning[UnusedImport]: on.}
import std/terminal # Is not used in tests
{.pop.}
import std/options
import std/parseutils
import std/strutils
import std/typetraits
import std/net
import pkg/chronos
import pkg/chronicles/helpers
@ -23,13 +29,12 @@ import pkg/confutils/std/net
import pkg/toml_serialization
import pkg/metrics
import pkg/metrics/chronos_httpserver
import pkg/stew/shims/net as stewnet
import pkg/stew/shims/parseutils
import pkg/stew/byteutils
import pkg/libp2p
import pkg/ethers
import pkg/questionable
import pkg/questionable/results
import pkg/stew/base64
import ./codextypes
import ./discovery
@ -37,31 +42,41 @@ import ./logutils
import ./stores
import ./units
import ./utils
import ./nat
import ./utils/natutils
export units, net, codextypes, logutils
from ./contracts/config import DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas
from ./validationconfig import MaxSlots, ValidationGroups
from ./blockexchange/engine/pendingblocks import DefaultBlockRetries
export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig
export ValidationGroups, MaxSlots
export
DefaultQuotaBytes,
DefaultBlockTtl,
DefaultBlockMaintenanceInterval,
DefaultNumberOfBlocksToMaintainPerInterval
DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval,
DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas, DefaultBlockRetries
type ThreadCount* = distinct Natural
proc `==`*(a, b: ThreadCount): bool {.borrow.}
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
let dataDir =
when defined(windows):
"AppData" / "Roaming" / "Storage"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
"Library" / "Application Support" / "Storage"
else:
".cache" / "codex"
".cache" / "storage"
getHomeDir() / dataDir
const
codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false
storage_enable_api_debug_peers* {.booldefine.} = false
storage_enable_proof_failures* {.booldefine.} = false
storage_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
DefaultThreadCount* = ThreadCount(0)
type
StartUpCmd* {.pure.} = enum
@ -86,285 +101,382 @@ type
CodexConf* = object
configFile* {.
desc: "Loads the configuration from a TOML file"
defaultValueDesc: "none"
defaultValue: InputFile.none
name: "config-file" }: Option[InputFile]
desc: "Loads the configuration from a TOML file",
defaultValueDesc: "none",
defaultValue: InputFile.none,
name: "config-file"
.}: Option[InputFile]
logLevel* {.
defaultValue: "info"
desc: "Sets the log level",
name: "log-level" }: string
logLevel* {.defaultValue: "info", desc: "Sets the log level", name: "log-level".}:
string
logFormat* {.
hidden
desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)"
defaultValueDesc: "auto"
defaultValue: LogKind.Auto
name: "log-format" }: LogKind
desc:
"Specifies what kind of logs should be written to stdout (auto, " &
"colors, nocolors, json)",
defaultValueDesc: "auto",
defaultValue: LogKind.Auto,
name: "log-format"
.}: LogKind
metricsEnabled* {.
desc: "Enable the metrics server"
defaultValue: false
name: "metrics" }: bool
desc: "Enable the metrics server", defaultValue: false, name: "metrics"
.}: bool
metricsAddress* {.
desc: "Listening address of the metrics server"
defaultValue: ValidIpAddress.init("127.0.0.1")
defaultValueDesc: "127.0.0.1"
name: "metrics-address" }: ValidIpAddress
desc: "Listening address of the metrics server",
defaultValue: defaultAddress(config),
defaultValueDesc: "127.0.0.1",
name: "metrics-address"
.}: IpAddress
metricsPort* {.
desc: "Listening HTTP port of the metrics server"
defaultValue: 8008
name: "metrics-port" }: Port
desc: "Listening HTTP port of the metrics server",
defaultValue: 8008,
name: "metrics-port"
.}: Port
dataDir* {.
desc: "The directory where codex will store configuration and data"
defaultValue: DefaultDataDir
defaultValueDesc: $DefaultDataDir
abbr: "d"
name: "data-dir" }: OutDir
desc: "The directory where Storage will store configuration and data",
defaultValue: defaultDataDir(),
defaultValueDesc: "",
abbr: "d",
name: "data-dir"
.}: OutDir
listenAddrs* {.
desc: "Multi Addresses to listen on"
defaultValue: @[
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
.expect("Should init multiaddress")]
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
abbr: "i"
name: "listen-addrs" }: seq[MultiAddress]
desc: "Multi Addresses to listen on",
defaultValue:
@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("Should init multiaddress")],
defaultValueDesc: "/ip4/0.0.0.0/tcp/0",
abbr: "i",
name: "listen-addrs"
.}: seq[MultiAddress]
# TODO: change this once we integrate nat support
nat* {.
desc: "IP Addresses to announce behind a NAT"
defaultValue: ValidIpAddress.init("127.0.0.1")
defaultValueDesc: "127.0.0.1"
abbr: "a"
name: "nat" }: ValidIpAddress
discoveryIp* {.
desc: "Discovery listen address"
defaultValue: ValidIpAddress.init(IPv4_any())
defaultValueDesc: "0.0.0.0"
abbr: "e"
name: "disc-ip" }: ValidIpAddress
desc:
"Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>",
defaultValue: defaultNatConfig(),
defaultValueDesc: "any",
name: "nat"
.}: NatConfig
discoveryPort* {.
desc: "Discovery (UDP) port"
defaultValue: 8090.Port
defaultValueDesc: "8090"
abbr: "u"
name: "disc-port" }: Port
desc: "Discovery (UDP) port",
defaultValue: 8090.Port,
defaultValueDesc: "8090",
abbr: "u",
name: "disc-port"
.}: Port
netPrivKeyFile* {.
desc: "Source of network (secp256k1) private key file path or name"
defaultValue: "key"
name: "net-privkey" }: string
desc: "Source of network (secp256k1) private key file path or name",
defaultValue: "key",
name: "net-privkey"
.}: string
bootstrapNodes* {.
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
abbr: "b"
name: "bootstrap-node" }: seq[SignedPeerRecord]
desc:
"Specifies one or more bootstrap nodes to use when " &
"connecting to the network",
abbr: "b",
name: "bootstrap-node"
.}: seq[SignedPeerRecord]
maxPeers* {.
desc: "The maximum number of peers to connect to"
defaultValue: 160
name: "max-peers" }: int
desc: "The maximum number of peers to connect to",
defaultValue: 160,
name: "max-peers"
.}: int
numThreads* {.
desc:
"Number of worker threads (\"0\" = use as many threads as there are CPU cores available)",
defaultValue: DefaultThreadCount,
name: "num-threads"
.}: ThreadCount
agentString* {.
defaultValue: "Codex"
desc: "Node agent string which is used as identifier in network"
name: "agent-string" }: string
defaultValue: "Logos Storage",
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
apiBindAddress* {.
desc: "The REST API bind address"
defaultValue: "127.0.0.1"
desc: "The REST API bind address",
defaultValue: "127.0.0.1".some,
name: "api-bindaddr"
}: string
.}: Option[string]
apiPort* {.
desc: "The REST Api port",
defaultValue: 8080.Port
defaultValueDesc: "8080"
name: "api-port"
abbr: "p" }: Port
defaultValue: 8080.Port,
defaultValueDesc: "8080",
name: "api-port",
abbr: "p"
.}: Port
apiCorsAllowedOrigin* {.
desc: "The REST Api CORS allowed origin for downloading data. '*' will allow all origins, '' will allow none.",
defaultValue: string.none
defaultValueDesc: "Disallow all cross origin requests to download data"
name: "api-cors-origin" }: Option[string]
repoKind* {.
desc: "Backend for main repo store (fs, sqlite, leveldb)"
defaultValueDesc: "fs"
defaultValue: repoFS
name: "repo-kind" }: RepoKind
storageQuota* {.
desc: "The size of the total storage quota dedicated to the node"
defaultValue: DefaultQuotaBytes
defaultValueDesc: $DefaultQuotaBytes
name: "storage-quota"
abbr: "q" }: NBytes
blockTtl* {.
desc: "Default block timeout in seconds - 0 disables the ttl"
defaultValue: DefaultBlockTtl
defaultValueDesc: $DefaultBlockTtl
name: "block-ttl"
abbr: "t" }: Duration
blockMaintenanceInterval* {.
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
defaultValue: DefaultBlockMaintenanceInterval
defaultValueDesc: $DefaultBlockMaintenanceInterval
name: "block-mi" }: Duration
blockMaintenanceNumberOfBlocks* {.
desc: "Number of blocks to check every maintenance cycle"
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
name: "block-mn" }: int
cacheSize* {.
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
defaultValue: 0
defaultValueDesc: "0"
name: "cache-size"
abbr: "c" }: NBytes
logFile* {.
desc: "Logs to file"
defaultValue: string.none
name: "log-file"
hidden
desc:
"The REST Api CORS allowed origin for downloading data. " &
"'*' will allow all origins, '' will allow none.",
defaultValue: string.none,
defaultValueDesc: "Disallow all cross origin requests to download data",
name: "api-cors-origin"
.}: Option[string]
case cmd* {.
defaultValue: noCmd
command }: StartUpCmd
repoKind* {.
desc: "Backend for main repo store (fs, sqlite, leveldb)",
defaultValueDesc: "fs",
defaultValue: repoFS,
name: "repo-kind"
.}: RepoKind
storageQuota* {.
desc: "The size of the total storage quota dedicated to the node",
defaultValue: DefaultQuotaBytes,
defaultValueDesc: $DefaultQuotaBytes,
name: "storage-quota",
abbr: "q"
.}: NBytes
blockTtl* {.
desc: "Default block timeout in seconds - 0 disables the ttl",
defaultValue: DefaultBlockTtl,
defaultValueDesc: $DefaultBlockTtl,
name: "block-ttl",
abbr: "t"
.}: Duration
blockMaintenanceInterval* {.
desc:
"Time interval in seconds - determines frequency of block " &
"maintenance cycle: how often blocks are checked " & "for expiration and cleanup",
defaultValue: DefaultBlockInterval,
defaultValueDesc: $DefaultBlockInterval,
name: "block-mi"
.}: Duration
blockMaintenanceNumberOfBlocks* {.
desc: "Number of blocks to check every maintenance cycle",
defaultValue: DefaultNumBlocksPerInterval,
defaultValueDesc: $DefaultNumBlocksPerInterval,
name: "block-mn"
.}: int
blockRetries* {.
desc: "Number of times to retry fetching a block before giving up",
defaultValue: DefaultBlockRetries,
defaultValueDesc: $DefaultBlockRetries,
name: "block-retries"
.}: int
cacheSize* {.
desc:
"The size of the block cache, 0 disables the cache - " &
"might help on slow hardrives",
defaultValue: 0,
defaultValueDesc: "0",
name: "cache-size",
abbr: "c"
.}: NBytes
logFile* {.
desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden
.}: Option[string]
case cmd* {.defaultValue: noCmd, command.}: StartUpCmd
of persistence:
ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node"
defaultValue: "ws://localhost:8545"
desc: "The URL of the JSON-RPC API of the Ethereum node",
defaultValue: "ws://localhost:8545",
name: "eth-provider"
.}: string
ethAccount* {.
desc: "The Ethereum account that is used for storage contracts"
defaultValue: EthAddress.none
defaultValueDesc: ""
desc: "The Ethereum account that is used for storage contracts",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "eth-account"
.}: Option[EthAddress]
ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts"
defaultValue: string.none
defaultValueDesc: ""
desc: "File containing Ethereum private key for storage contracts",
defaultValue: string.none,
defaultValueDesc: "",
name: "eth-private-key"
.}: Option[string]
marketplaceAddress* {.
desc: "Address of deployed Marketplace contract"
defaultValue: EthAddress.none
defaultValueDesc: ""
desc: "Address of deployed Marketplace contract",
defaultValue: EthAddress.none,
defaultValueDesc: "",
name: "marketplace-address"
.}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled."
defaultValue: 0
name: "simulate-proof-failures"
desc: "Simulates proof failures once every N proofs. 0 = disabled.",
defaultValue: 0,
name: "simulate-proof-failures",
hidden
.}: int
validator* {.
desc: "Enables validator, requires an Ethereum node"
defaultValue: false
desc: "Enables validator, requires an Ethereum node",
defaultValue: false,
name: "validator"
.}: bool
validatorMaxSlots* {.
desc: "Maximum number of slots that the validator monitors"
defaultValue: 1000
desc: "Maximum number of slots that the validator monitors",
longDesc:
"If set to 0, the validator will not limit " &
"the maximum number of slots it monitors",
defaultValue: 1000,
name: "validator-max-slots"
.}: int
.}: MaxSlots
case persistenceCmd* {.
defaultValue: noCmd
command }: PersistenceCmd
validatorGroups* {.
desc: "Slot validation groups",
longDesc:
"A number indicating total number of groups into " &
"which the whole slot id space will be divided. " &
"The value must be in the range [2, 65535]. " &
"If not provided, the validator will observe " &
"the whole slot id space and the value of " &
"the --validator-group-index parameter will be ignored. " &
"Powers of twos are advised for even distribution",
defaultValue: ValidationGroups.none,
name: "validator-groups"
.}: Option[ValidationGroups]
validatorGroupIndex* {.
desc: "Slot validation group index",
longDesc:
"The value provided must be in the range " &
"[0, validatorGroups). Ignored when --validator-groups " &
"is not provided. Only slot ids satisfying condition " &
"[(slotId mod validationGroups) == groupIndex] will be " &
"observed by the validator",
defaultValue: 0,
name: "validator-group-index"
.}: uint16
rewardRecipient* {.
desc: "Address to send payouts to (eg rewards and refunds)",
name: "reward-recipient"
.}: Option[EthAddress]
marketplaceRequestCacheSize* {.
desc:
"Maximum number of StorageRequests kept in memory." &
"Reduces fetching of StorageRequest data from the contract.",
defaultValue: DefaultRequestCacheSize,
defaultValueDesc: $DefaultRequestCacheSize,
name: "request-cache-size",
hidden
.}: uint16
maxPriorityFeePerGas* {.
desc:
"Sets the default maximum priority fee per gas for Ethereum EIP-1559 transactions, in wei, when not provided by the network.",
defaultValue: DefaultMaxPriorityFeePerGas,
defaultValueDesc: $DefaultMaxPriorityFeePerGas,
name: "max-priority-fee-per-gas",
hidden
.}: uint64
case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd
of PersistenceCmd.prover:
circuitDir* {.
desc: "Directory where Storage will store proof circuit data",
defaultValue: defaultDataDir() / "circuits",
defaultValueDesc: "data/circuits",
abbr: "cd",
name: "circuit-dir"
.}: OutDir
circomR1cs* {.
desc: "The r1cs file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
desc: "The r1cs file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.r1cs",
defaultValueDesc: "data/circuits/proof_main.r1cs",
name: "circom-r1cs"
.}: InputFile
circomWasm* {.
desc: "The wasm file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
desc: "The wasm file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.wasm",
defaultValueDesc: "data/circuits/proof_main.wasm",
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
desc: "The zkey file for the storage circuit",
defaultValue: defaultDataDir() / "circuits" / "proof_main.zkey",
defaultValueDesc: "data/circuits/proof_main.zkey",
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!"
defaultValue: false
desc: "Ignore the zkey file - use only for testing!",
defaultValue: false,
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove"
defaultValue: DefaultSamplesNum
defaultValueDesc: $DefaultSamplesNum
name: "proof-samples" }: int
desc: "Number of samples to prove",
defaultValue: DefaultSamplesNum,
defaultValueDesc: $DefaultSamplesNum,
name: "proof-samples"
.}: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree"
defaultValue: DefaultMaxSlotDepth
defaultValueDesc: $DefaultMaxSlotDepth
name: "max-slot-depth" }: int
desc: "The maximum depth of the slot tree",
defaultValue: DefaultMaxSlotDepth,
defaultValueDesc: $DefaultMaxSlotDepth,
name: "max-slot-depth"
.}: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree"
defaultValue: DefaultMaxDatasetDepth
defaultValueDesc: $DefaultMaxDatasetDepth
name: "max-dataset-depth" }: int
desc: "The maximum depth of the dataset tree",
defaultValue: DefaultMaxDatasetDepth,
defaultValueDesc: $DefaultMaxDatasetDepth,
name: "max-dataset-depth"
.}: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree"
defaultValue: DefaultBlockDepth
defaultValueDesc: $DefaultBlockDepth
name: "max-block-depth" }: int
desc: "The maximum depth of the network block merkle tree",
defaultValue: DefaultBlockDepth,
defaultValueDesc: $DefaultBlockDepth,
name: "max-block-depth"
.}: int
maxCellElms* {.
desc: "The maximum number of elements in a cell"
defaultValue: DefaultCellElms
defaultValueDesc: $DefaultCellElms
name: "max-cell-elements" }: int
desc: "The maximum number of elements in a cell",
defaultValue: DefaultCellElms,
defaultValueDesc: $DefaultCellElms,
name: "max-cell-elements"
.}: int
of PersistenceCmd.noCmd:
discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress): %it
logutils.formatIt(LogFormat.textLines, EthAddress):
it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress):
%it
func defaultAddress*(conf: CodexConf): IpAddress =
result = static parseIpAddress("127.0.0.1")
func defaultNatConfig*(): NatConfig =
result = NatConfig(hasExtIp: false, nat: NatStrategy.NatAny)
func persistence*(self: CodexConf): bool =
self.cmd == StartUpCmd.persistence
@ -373,7 +485,7 @@ func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string =
let tag = strip(staticExec("git tag"))
let tag = strip(staticExec("git describe --tags --abbrev=0"))
if tag.isEmptyOrWhitespace:
return "untagged build"
return tag
@ -383,63 +495,132 @@ proc getCodexRevision(): string =
var res = strip(staticExec("git rev-parse --short HEAD"))
return res
proc getCodexContractsRevision(): string =
let res =
strip(staticExec("git rev-parse --short HEAD:vendor/logos-storage-contracts-eth"))
return res
proc getNimBanner(): string =
staticExec("nim --version | grep Version")
const
codexVersion* = getCodexVersion()
codexRevision* = getCodexRevision()
codexContractsRevision* = getCodexContractsRevision()
nimBanner* = getNimBanner()
codexFullVersion* =
"Codex version: " & codexVersion & "\p" &
"Codex revision: " & codexRevision & "\p" &
nimBanner
"Storage version: " & codexVersion & "\p" & "Storage revision: " & codexRevision &
"\p" & "Storage contracts revision: " & codexContractsRevision & "\p" & nimBanner
proc parseCmdArg*(T: typedesc[MultiAddress],
input: string): MultiAddress
{.upraises: [ValueError, LPError].} =
proc parseCmdArg*(
T: typedesc[MultiAddress], input: string
): MultiAddress {.raises: [ValueError].} =
var ma: MultiAddress
try:
let res = MultiAddress.init(input)
if res.isOk:
ma = res.get()
else:
warn "Invalid MultiAddress", input=input, error = res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
except LPError as exc:
fatal "Invalid MultiAddress uri", uri = input, error = exc.msg
quit QuitFailure
ma
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
proc parse*(T: type ThreadCount, p: string): Result[ThreadCount, string] =
try:
let count = parseInt(p)
if count != 0 and count < 2:
return err("Invalid number of threads: " & p)
return ok(ThreadCount(count))
except ValueError as e:
return err("Invalid number of threads: " & p & ", error=" & e.msg)
proc parseCmdArg*(T: type ThreadCount, input: string): T =
let val = ThreadCount.parse(input)
if val.isErr:
fatal "Cannot parse the thread count.", input = input, error = val.error()
quit QuitFailure
return val.get()
proc parse*(T: type SignedPeerRecord, p: string): Result[SignedPeerRecord, string] =
var res: SignedPeerRecord
try:
if not res.fromURI(uri):
warn "Invalid SignedPeerRecord uri", uri = uri
if not res.fromURI(p):
return err("The uri is not a valid SignedPeerRecord: " & p)
return ok(res)
except LPError, Base64Error:
let e = getCurrentException()
return err(e.msg)
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
let res = SignedPeerRecord.parse(uri)
if res.isErr:
fatal "Cannot parse the signed peer.", error = res.error(), input = uri
quit QuitFailure
except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
return res.get()
func parse*(T: type NatConfig, p: string): Result[NatConfig, string] =
case p.toLowerAscii
of "any":
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatAny))
of "none":
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatNone))
of "upnp":
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp))
of "pmp":
return ok(NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp))
else:
if p.startsWith("extip:"):
try:
let ip = parseIpAddress(p[6 ..^ 1])
return ok(NatConfig(hasExtIp: true, extIp: ip))
except ValueError:
let error = "Not a valid IP address: " & p[6 ..^ 1]
return err(error)
else:
return err("Not a valid NAT option: " & p)
proc parseCmdArg*(T: type NatConfig, p: string): T =
let res = NatConfig.parse(p)
if res.isErr:
fatal "Cannot parse the NAT config.", error = res.error(), input = p
quit QuitFailure
res
return res.get()
proc completeCmdArg*(T: type NatConfig, val: string): seq[string] =
return @[]
proc parseCmdArg*(T: type EthAddress, address: string): T =
EthAddress.init($address).get()
proc parseCmdArg*(T: type NBytes, val: string): T =
func parse*(T: type NBytes, p: string): Result[NBytes, string] =
var num = 0'i64
let count = parseSize(val, num, alwaysBin = true)
let count = parseSize(p, num, alwaysBin = true)
if count == 0:
warn "Invalid number of bytes", nbytes = val
return err("Invalid number of bytes: " & p)
return ok(NBytes(num))
proc parseCmdArg*(T: type NBytes, val: string): T =
let res = NBytes.parse(val)
if res.isErr:
fatal "Cannot parse NBytes.", error = res.error(), input = val
quit QuitFailure
NBytes(num)
return res.get()
proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration
let count = parseDuration(val, dur)
if count == 0:
warn "Cannot parse duration", dur = dur
fatal "Cannot parse duration", dur = dur
quit QuitFailure
dur
proc readValue*(r: var TomlReader, val: var EthAddress)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var EthAddress
) {.raises: [SerializationError, IOError].} =
val = EthAddress.init(r.readValue(string)).get()
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
@ -447,7 +628,11 @@ proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
error "invalid SignedPeerRecord configuration value", error = err.msg
quit QuitFailure
try:
val = SignedPeerRecord.parseCmdArg(uri)
except LPError as err:
fatal "Invalid SignedPeerRecord uri", uri = uri, error = err.msg
quit QuitFailure
proc readValue*(r: var TomlReader, val: var MultiAddress) =
without input =? r.readValue(string).catch, err:
@ -458,11 +643,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) =
if res.isOk:
val = res.get()
else:
warn "Invalid MultiAddress", input=input, error=res.error()
fatal "Invalid MultiAddress", input = input, error = res.error()
quit QuitFailure
proc readValue*(r: var TomlReader, val: var NBytes)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var NBytes
) {.raises: [SerializationError, IOError].} =
var value = 0'i64
var str = r.readValue(string)
let count = parseSize(str, value, alwaysBin = true)
@ -471,8 +657,18 @@ proc readValue*(r: var TomlReader, val: var NBytes)
quit QuitFailure
val = NBytes(value)
proc readValue*(r: var TomlReader, val: var Duration)
{.upraises: [SerializationError, IOError].} =
proc readValue*(
r: var TomlReader, val: var ThreadCount
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
try:
val = parseCmdArg(ThreadCount, str)
except CatchableError as err:
raise newException(SerializationError, err.msg)
proc readValue*(
r: var TomlReader, val: var Duration
) {.raises: [SerializationError, IOError].} =
var str = r.readValue(string)
var dur: Duration
let count = parseDuration(str, dur)
@ -481,14 +677,26 @@ proc readValue*(r: var TomlReader, val: var Duration)
quit QuitFailure
val = dur
proc readValue*(
r: var TomlReader, val: var NatConfig
) {.raises: [SerializationError].} =
val =
try:
parseCmdArg(NatConfig, r.readValue(string))
except CatchableError as err:
raise newException(SerializationError, err.msg)
# no idea why confutils needs this:
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
proc completeCmdArg*(T: type EthAddress, val: string): seq[string] =
discard
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
proc completeCmdArg*(T: type NBytes, val: string): seq[string] =
discard
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
proc completeCmdArg*(T: type Duration, val: string): seq[string] =
discard
proc completeCmdArg*(T: type ThreadCount, val: string): seq[string] =
discard
# silly chronicles, colors is a compile-time property
@ -527,13 +735,16 @@ proc stripAnsi*(v: string): string =
res
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
# Updates log levels (without clearing old ones)
let directives = logLevel.split(";")
try:
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
except ValueError:
raise (ref ValueError)(msg: "Please specify one of: trace, debug, info, notice, warn, error or fatal")
raise (ref ValueError)(
msg:
"Please specify one of: trace, debug, " & "info, notice, warn, error or fatal"
)
if directives.len > 1:
for topicName, settings in parseTopicDirectives(directives[1 ..^ 1]):
@ -545,7 +756,9 @@ proc setupLogging*(conf: CodexConf) =
warn "Logging configuration options not enabled in the current build"
else:
var logFile: ?IoHandle
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) =
discard
proc writeAndFlush(f: File, msg: LogOutputStr) =
try:
f.write(msg)
@ -566,14 +779,11 @@ proc setupLogging*(conf: CodexConf) =
defaultChroniclesStream.outputs[2].writer = noOutput
if logFilePath =? conf.logFile and logFilePath.len > 0:
let logFileHandle = openFile(
logFilePath,
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
)
let logFileHandle =
openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate})
if logFileHandle.isErr:
error "failed to open log file",
path = logFilePath,
errorCode = $logFileHandle.error
path = logFilePath, errorCode = $logFileHandle.error
else:
logFile = logFileHandle.option
defaultChroniclesStream.outputs[2].writer = fileFlush
@ -581,39 +791,30 @@ proc setupLogging*(conf: CodexConf) =
defaultChroniclesStream.outputs[1].writer = noOutput
let writer =
case conf.logFormat:
case conf.logFormat
of LogKind.Auto:
if isatty(stdout):
if isatty(stdout): stdoutFlush else: noColorsFlush
of LogKind.Colors:
stdoutFlush
else:
of LogKind.NoColors:
noColorsFlush
of LogKind.Colors: stdoutFlush
of LogKind.NoColors: noColorsFlush
of LogKind.Json:
defaultChroniclesStream.outputs[1].writer = stdoutFlush
noOutput
of LogKind.None:
noOutput
when codex_enable_log_counter:
when storage_enable_log_counter:
var counter = 0.uint64
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
inc(counter)
let withoutNewLine = msg[0 ..^ 2]
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
defaultChroniclesStream.outputs[0].writer = numberedWriter
else:
defaultChroniclesStream.outputs[0].writer = writer
try:
updateLogLevel(conf.logLevel)
except ValueError as err:
try:
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
except IOError:
echo "Invalid value for --log-level. " & err.msg
quit QuitFailure
proc setupMetrics*(config: CodexConf) =
if config.metricsEnabled:
let metricsAddress = config.metricsAddress

View File

@ -0,0 +1,8 @@
const ContentIdsExts = [
multiCodec("codex-root"),
multiCodec("codex-manifest"),
multiCodec("codex-block"),
multiCodec("codex-slot-root"),
multiCodec("codex-proving-root"),
multiCodec("codex-slot-cell"),
]

View File

@ -2,8 +2,10 @@ import contracts/requests
import contracts/marketplace
import contracts/market
import contracts/interactions
import contracts/provider
export requests
export marketplace
export market
export interactions
export provider

View File

@ -1,13 +1,13 @@
Codex Contracts in Nim
Logos Storage Contracts in Nim
=======================
Nim API for the [Codex smart contracts][1].
Nim API for the [Logos Storage smart contracts][1].
Usage
-----
For a global overview of the steps involved in starting and fulfilling a
storage contract, see [Codex Contracts][1].
storage contract, see [Logos Storage Contracts][1].
Smart contract
--------------
@ -144,5 +144,5 @@ await storage
.markProofAsMissing(id, period)
```
[1]: https://github.com/status-im/codex-contracts-eth/
[2]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
[1]: https://github.com/logos-storage/logos-storage-contracts-eth/
[2]: https://github.com/logos-storage/logos-storage-research/blob/master/design/storage-proof-timing.md

View File

@ -1,26 +1,32 @@
{.push raises: [].}
import std/times
import pkg/ethers
import pkg/questionable
import pkg/chronos
import pkg/stint
import ../clock
import ../conf
import ../utils/trackedfutures
export clock
logScope:
topics = "contracts clock"
type
OnChainClock* = ref object of Clock
type OnChainClock* = ref object of Clock
provider: Provider
subscription: Subscription
offset: times.Duration
blockNumber: UInt256
started: bool
newBlock: AsyncEvent
trackedFutures: TrackedFutures
proc new*(_: type OnChainClock, provider: Provider): OnChainClock =
OnChainClock(provider: provider, newBlock: newAsyncEvent())
OnChainClock(
provider: provider, newBlock: newAsyncEvent(), trackedFutures: TrackedFutures()
)
proc update(clock: OnChainClock, blck: Block) =
if number =? blck.number and number > clock.blockNumber:
@ -28,26 +34,28 @@ proc update(clock: OnChainClock, blck: Block) =
let computerTime = getTime()
clock.offset = blockTime - computerTime
clock.blockNumber = number
trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset
trace "updated clock",
blockTime = blck.timestamp, blockNumber = number, offset = clock.offset
clock.newBlock.fire()
proc update(clock: OnChainClock) {.async.} =
proc update(clock: OnChainClock) {.async: (raises: []).} =
try:
if latest =? (await clock.provider.getBlock(BlockTag.latest)):
clock.update(latest)
except CancelledError as error:
raise error
except CatchableError as error:
debug "error updating clock: ", error = error.msg
discard
method start*(clock: OnChainClock) {.async.} =
if clock.started:
return
proc onBlock(_: Block) =
proc onBlock(blckResult: ?!Block) =
if eventError =? blckResult.errorOption:
error "There was an error in block subscription", msg = eventError.msg
return
# ignore block parameter; hardhat may call this with pending blocks
asyncSpawn clock.update()
clock.trackedFutures.track(clock.update())
await clock.update()
@ -59,13 +67,16 @@ method stop*(clock: OnChainClock) {.async.} =
return
await clock.subscription.unsubscribe()
await clock.trackedFutures.cancelTracked()
clock.started = false
method now*(clock: OnChainClock): SecondsSince1970 =
doAssert clock.started, "clock should be started before calling now()"
return toUnix(getTime() + clock.offset)
method waitUntil*(clock: OnChainClock, time: SecondsSince1970) {.async.} =
method waitUntil*(
clock: OnChainClock, time: SecondsSince1970
) {.async: (raises: [CancelledError]).} =
while (let difference = time - clock.now(); difference > 0):
clock.newBlock.clear()
discard await clock.newBlock.wait().withTimeout(chronos.seconds(difference))

View File

@ -1,47 +1,71 @@
import pkg/contractabi
import pkg/ethers/fields
import pkg/ethers/contracts/fields
import pkg/questionable/results
export contractabi
const DefaultRequestCacheSize* = 128.uint16
const DefaultMaxPriorityFeePerGas* = 1_000_000_000.uint64
type
MarketplaceConfig* = object
collateral*: CollateralConfig
proofs*: ProofConfig
CollateralConfig* = object
repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashCriterion*: uint16 # amount of proofs missed that lead to slashing
slashPercentage*: uint8 # percentage of the collateral that is slashed
ProofConfig* = object
period*: UInt256 # proofs requirements are calculated per period (in seconds)
timeout*: UInt256 # mark proofs as missing before the timeout (in seconds)
downtime*: uint8 # ignore this much recent blocks for proof requirements
zkeyHash*: string # hash of the zkey file which is linked to the verifier
reservations*: SlotReservationsConfig
requestDurationLimit*: uint64
CollateralConfig* = object
repairRewardPercentage*: uint8
# percentage of remaining collateral slot has after it has been freed
maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value
slashPercentage*: uint8 # percentage of the collateral that is slashed
validatorRewardPercentage*: uint8
# percentage of the slashed amount going to the validators
ProofConfig* = object
period*: uint64 # proofs requirements are calculated per period (in seconds)
timeout*: uint64 # mark proofs as missing before the timeout (in seconds)
downtime*: uint8 # ignore this much recent blocks for proof requirements
downtimeProduct*: uint8
zkeyHash*: string # hash of the zkey file which is linked to the verifier
# Ensures the pointer does not remain in downtime for many consecutive
# periods. For each period increase, move the pointer `pointerProduct`
# blocks. Should be a prime number to ensure there are no cycles.
SlotReservationsConfig* = object
maxReservations*: uint8
func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig =
ProofConfig(
period: tupl[0],
timeout: tupl[1],
downtime: tupl[2],
zkeyHash: tupl[3]
downtimeProduct: tupl[3],
zkeyHash: tupl[4],
)
func fromTuple(_: type SlotReservationsConfig, tupl: tuple): SlotReservationsConfig =
SlotReservationsConfig(maxReservations: tupl[0])
func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig =
CollateralConfig(
repairRewardPercentage: tupl[0],
maxNumberOfSlashes: tupl[1],
slashCriterion: tupl[2],
slashPercentage: tupl[3]
slashPercentage: tupl[2],
validatorRewardPercentage: tupl[3],
)
func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig =
MarketplaceConfig(
collateral: tupl[0],
proofs: tupl[1]
proofs: tupl[1],
reservations: tupl[2],
requestDurationLimit: tupl[3],
)
func solidityType*(_: type SlotReservationsConfig): string =
solidityType(SlotReservationsConfig.fieldTypes)
func solidityType*(_: type ProofConfig): string =
solidityType(ProofConfig.fieldTypes)
@ -49,7 +73,10 @@ func solidityType*(_: type CollateralConfig): string =
solidityType(CollateralConfig.fieldTypes)
func solidityType*(_: type MarketplaceConfig): string =
solidityType(CollateralConfig.fieldTypes)
solidityType(MarketplaceConfig.fieldTypes)
func encode*(encoder: var AbiEncoder, slot: SlotReservationsConfig) =
encoder.write(slot.fieldValues)
func encode*(encoder: var AbiEncoder, slot: ProofConfig) =
encoder.write(slot.fieldValues)
@ -64,6 +91,10 @@ func decode*(decoder: var AbiDecoder, T: type ProofConfig): ?!T =
let tupl = ?decoder.read(ProofConfig.fieldTypes)
success ProofConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type SlotReservationsConfig): ?!T =
let tupl = ?decoder.read(SlotReservationsConfig.fieldTypes)
success SlotReservationsConfig.fromTuple(tupl)
func decode*(decoder: var AbiDecoder, T: type CollateralConfig): ?!T =
let tupl = ?decoder.read(CollateralConfig.fieldTypes)
success CollateralConfig.fromTuple(tupl)

View File

@ -9,17 +9,21 @@ import ./marketplace
type Deployment* = ref object
provider: Provider
config: CodexConf
marketplaceAddressOverride: ?Address
const knownAddresses = {
# Hardhat localhost network
"31337": {
"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"),
}.toTable,
"31337":
{"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable,
# Taiko Alpha-3 Testnet
"167005": {
"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")
}.toTable
"167005":
{"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable,
# Codex Testnet - Jun 19 2025 13:11:56 PM (+00:00 UTC)
"789987":
{"Marketplace": Address.init("0x5378a4EA5dA2a548ce22630A3AE74b052000C62D")}.toTable,
# Linea (Status)
"1660990954":
{"Marketplace": Address.init("0x34F606C65869277f236ce07aBe9af0B8c88F486B")}.toTable,
}.toTable
proc getKnownAddress(T: type, chainId: UInt256): ?Address =
@ -31,12 +35,16 @@ proc getKnownAddress(T: type, chainId: UInt256): ?Address =
return knownAddresses[id].getOrDefault($T, Address.none)
proc new*(_: type Deployment, provider: Provider, config: CodexConf): Deployment =
Deployment(provider: provider, config: config)
proc new*(
_: type Deployment,
provider: Provider,
marketplaceAddressOverride: ?Address = none Address,
): Deployment =
Deployment(provider: provider, marketplaceAddressOverride: marketplaceAddressOverride)
proc address*(deployment: Deployment, contract: type): Future[?Address] {.async.} =
when contract is Marketplace:
if address =? deployment.config.marketplaceAddress:
if address =? deployment.marketplaceAddressOverride:
return some address
let chainId = await deployment.provider.getChainId()

View File

@ -9,13 +9,12 @@ import ./interactions
export purchasing
export logutils
type
ClientInteractions* = ref object of ContractInteractions
type ClientInteractions* = ref object of ContractInteractions
purchasing*: Purchasing
proc new*(_: type ClientInteractions,
clock: OnChainClock,
purchasing: Purchasing): ClientInteractions =
proc new*(
_: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing
): ClientInteractions =
ClientInteractions(clock: clock, purchasing: purchasing)
proc start*(self: ClientInteractions) {.async.} =

View File

@ -7,15 +7,10 @@ import ./interactions
export sales
export logutils
type
HostInteractions* = ref object of ContractInteractions
type HostInteractions* = ref object of ContractInteractions
sales*: Sales
proc new*(
_: type HostInteractions,
clock: Clock,
sales: Sales
): HostInteractions =
proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions =
## Create a new HostInteractions instance
##
HostInteractions(clock: clock, sales: sales)

View File

@ -5,8 +5,7 @@ import ../market
export clock
type
ContractInteractions* = ref object of RootObj
type ContractInteractions* = ref object of RootObj
clock*: Clock
method start*(self: ContractInteractions) {.async, base.} =

View File

@ -3,13 +3,12 @@ import ../../validation
export validation
type
ValidatorInteractions* = ref object of ContractInteractions
type ValidatorInteractions* = ref object of ContractInteractions
validation: Validation
proc new*(_: type ValidatorInteractions,
clock: OnChainClock,
validation: Validation): ValidatorInteractions =
proc new*(
_: type ValidatorInteractions, clock: OnChainClock, validation: Validation
): ValidatorInteractions =
ValidatorInteractions(clock: clock, validation: validation)
proc start*(self: ValidatorInteractions) {.async.} =

View File

@ -1,14 +1,14 @@
import std/sequtils
import std/strformat
import std/strutils
import std/sugar
import pkg/ethers
import pkg/upraises
import pkg/questionable
import pkg/lrucache
import ../utils/exceptions
import ../logutils
import ../market
import ./marketplace
import ./proofs
import ./provider
export market
@ -19,122 +19,226 @@ type
OnChainMarket* = ref object of Market
contract: Marketplace
signer: Signer
rewardRecipient: ?Address
configuration: ?MarketplaceConfig
requestCache: LruCache[string, StorageRequest]
allowanceLock: AsyncLock
MarketSubscription = market.Subscription
EventSubscription = ethers.Subscription
OnChainMarketSubscription = ref object of MarketSubscription
eventSubscription: EventSubscription
func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket =
func new*(
_: type OnChainMarket,
contract: Marketplace,
rewardRecipient = Address.none,
requestCacheSize: uint16 = DefaultRequestCacheSize,
): OnChainMarket =
without signer =? contract.signer:
raiseAssert("Marketplace contract should have a signer")
var requestCache = newLruCache[string, StorageRequest](int(requestCacheSize))
OnChainMarket(
contract: contract,
signer: signer,
rewardRecipient: rewardRecipient,
requestCache: requestCache,
)
proc raiseMarketError(message: string) {.raises: [MarketError].} =
raise newException(MarketError, message)
template convertEthersError(body) =
func prefixWith(suffix, prefix: string, separator = ": "): string =
if prefix.len > 0:
return &"{prefix}{separator}{suffix}"
else:
return suffix
template convertEthersError(msg: string = "", body) =
try:
body
except EthersError as error:
raiseMarketError(error.msgDetail)
raiseMarketError(error.msgDetail.prefixWith(msg))
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} =
proc config(
market: OnChainMarket
): Future[MarketplaceConfig] {.async: (raises: [CancelledError, MarketError]).} =
without resolvedConfig =? market.configuration:
if err =? (await market.loadConfig()).errorOption:
raiseMarketError(err.msg)
without config =? market.configuration:
raiseMarketError("Failed to access to config from the Marketplace contract")
return config
return resolvedConfig
template withAllowanceLock*(market: OnChainMarket, body: untyped) =
if market.allowanceLock.isNil:
market.allowanceLock = newAsyncLock()
await market.allowanceLock.acquire()
try:
body
finally:
try:
market.allowanceLock.release()
except AsyncLockError as error:
raise newException(Defect, error.msg, error)
proc approveFunds(
market: OnChainMarket, amount: UInt256
) {.async: (raises: [CancelledError, MarketError]).} =
debug "Approving tokens", amount
convertEthersError:
convertEthersError("Failed to approve funds"):
let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer)
discard await token.increaseAllowance(market.contract.address(), amount).confirm(0)
let owner = await market.signer.getAddress()
let spender = market.contract.address
market.withAllowanceLock:
let allowance = await token.allowance(owner, spender)
discard await token.approve(spender, allowance + amount).confirm(1)
method getZkeyHash*(market: OnChainMarket): Future[?string] {.async.} =
let config = await market.contract.config()
method loadConfig*(
market: OnChainMarket
): Future[?!void] {.async: (raises: [CancelledError]).} =
try:
without config =? market.configuration:
let fetchedConfig = await market.contract.configuration()
market.configuration = some fetchedConfig
return success()
except EthersError as err:
return failure newException(
MarketError,
"Failed to fetch the config from the Marketplace contract: " & err.msg,
)
method getZkeyHash*(
market: OnChainMarket
): Future[?string] {.async: (raises: [CancelledError, MarketError]).} =
let config = await market.config()
return some config.proofs.zkeyHash
method getSigner*(market: OnChainMarket): Future[Address] {.async.} =
convertEthersError:
method getSigner*(
market: OnChainMarket
): Future[Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get signer address"):
return await market.signer.getAddress()
method periodicity*(market: OnChainMarket): Future[Periodicity] {.async.} =
convertEthersError:
let config = await market.contract.config()
method periodicity*(
market: OnChainMarket
): Future[Periodicity] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
let period = config.proofs.period
return Periodicity(seconds: period)
method proofTimeout*(market: OnChainMarket): Future[UInt256] {.async.} =
convertEthersError:
let config = await market.contract.config()
method proofTimeout*(
market: OnChainMarket
): Future[uint64] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.timeout
method proofDowntime*(market: OnChainMarket): Future[uint8] {.async.} =
convertEthersError:
let config = await market.contract.config()
method repairRewardPercentage*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.collateral.repairRewardPercentage
method requestDurationLimit*(market: OnChainMarket): Future[uint64] {.async.} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.requestDurationLimit
method proofDowntime*(
market: OnChainMarket
): Future[uint8] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get Marketplace config"):
let config = await market.config()
return config.proofs.downtime
method getPointer*(market: OnChainMarket, slotId: SlotId): Future[uint8] {.async.} =
convertEthersError:
convertEthersError("Failed to get slot pointer"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getPointer(slotId, overrides)
method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my requests"):
return await market.contract.myRequests
method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} =
convertEthersError:
convertEthersError("Failed to get my slots"):
let slots = await market.contract.mySlots()
debug "Fetched my slots", numSlots = len(slots)
return slots
method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} =
convertEthersError:
method requestStorage(
market: OnChainMarket, request: StorageRequest
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to request storage"):
debug "Requesting storage"
await market.approveFunds(request.price())
discard await market.contract.requestStorage(request).confirm(0)
await market.approveFunds(request.totalPrice())
discard await market.contract.requestStorage(request).confirm(1)
method getRequest(market: OnChainMarket,
id: RequestId): Future[?StorageRequest] {.async.} =
convertEthersError:
method getRequest*(
market: OnChainMarket, id: RequestId
): Future[?StorageRequest] {.async: (raises: [CancelledError]).} =
try:
return some await market.contract.getRequest(id)
except ProviderError as e:
if e.msgDetail.contains("Unknown request"):
return none StorageRequest
raise e
let key = $id
method requestState*(market: OnChainMarket,
requestId: RequestId): Future[?RequestState] {.async.} =
convertEthersError:
if key in market.requestCache:
return some market.requestCache[key]
let request = await market.contract.getRequest(id)
market.requestCache[key] = request
return some request
except Marketplace_UnknownRequest, KeyError:
warn "Cannot retrieve the request", error = getCurrentExceptionMsg()
return none StorageRequest
except EthersError as e:
error "Cannot retrieve the request", error = e.msg
return none StorageRequest
method requestState*(
market: OnChainMarket, requestId: RequestId
): Future[?RequestState] {.async.} =
convertEthersError("Failed to get request state"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return some await market.contract.requestState(requestId, overrides)
except ProviderError as e:
if e.msgDetail.contains("Unknown request"):
except Marketplace_UnknownRequest:
return none RequestState
raise e
method slotState*(market: OnChainMarket,
slotId: SlotId): Future[SlotState] {.async.} =
convertEthersError:
method slotState*(
market: OnChainMarket, slotId: SlotId
): Future[SlotState] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fetch the slot state from the Marketplace contract"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.slotState(slotId, overrides)
method getRequestEnd*(market: OnChainMarket,
id: RequestId): Future[SecondsSince1970] {.async.} =
convertEthersError:
method getRequestEnd*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request end"):
return await market.contract.requestEnd(id)
method requestExpiresAt*(market: OnChainMarket,
id: RequestId): Future[SecondsSince1970] {.async.} =
convertEthersError:
method requestExpiresAt*(
market: OnChainMarket, id: RequestId
): Future[SecondsSince1970] {.async.} =
convertEthersError("Failed to get request expiry"):
return await market.contract.requestExpiry(id)
method getHost(market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256): Future[?Address] {.async.} =
convertEthersError:
method getHost(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to get slot's host"):
let slotId = slotId(requestId, slotIndex)
let address = await market.contract.getHost(slotId)
if address != Address.default:
@ -142,228 +246,435 @@ method getHost(market: OnChainMarket,
else:
return none Address
method getActiveSlot*(market: OnChainMarket,
slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError:
method currentCollateral*(
market: OnChainMarket, slotId: SlotId
): Future[UInt256] {.async: (raises: [MarketError, CancelledError]).} =
convertEthersError("Failed to get slot's current collateral"):
return await market.contract.currentCollateral(slotId)
method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} =
convertEthersError("Failed to get active slot"):
try:
return some await market.contract.getActiveSlot(slotId)
except ProviderError as e:
if e.msgDetail.contains("Slot is free"):
except Marketplace_SlotIsFree:
return none Slot
raise e
method fillSlot(market: OnChainMarket,
method fillSlot(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256) {.async.} =
convertEthersError:
collateral: UInt256,
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to fill slot"):
logScope:
requestId
slotIndex
try:
await market.approveFunds(collateral)
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(0)
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
convertEthersError:
discard await market.contract.freeSlot(slotId).confirm(0)
# Add 10% to gas estimate to deal with different evm code flow when we
# happen to be the last one to fill a slot in this request
trace "estimating gas for fillSlot"
let gas = await market.contract.estimateGas.fillSlot(requestId, slotIndex, proof)
let gasLimit = (gas * 110) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
method withdrawFunds(market: OnChainMarket,
requestId: RequestId) {.async.} =
convertEthersError:
discard await market.contract.withdrawFunds(requestId).confirm(0)
trace "calling fillSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard await market.contract
.fillSlot(requestId, slotIndex, proof, overrides)
.confirm(1)
trace "fillSlot transaction completed"
except Marketplace_SlotNotFree as parent:
raise newException(
SlotStateMismatchError, "Failed to fill slot because the slot is not free",
parent,
)
method isProofRequired*(market: OnChainMarket,
id: SlotId): Future[bool] {.async.} =
convertEthersError:
method freeSlot*(
market: OnChainMarket, slotId: SlotId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to free slot"):
try:
var freeSlot: Future[Confirmable]
if rewardRecipient =? market.rewardRecipient:
# If --reward-recipient specified, use it as the reward recipient, and use
# the SP's address as the collateral recipient
let collateralRecipient = await market.getSigner()
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(
slotId, rewardRecipient, collateralRecipient
)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(
slotId,
rewardRecipient, # --reward-recipient
collateralRecipient, # SP's address
overrides,
)
else:
# Otherwise, use the SP's address as both the reward and collateral
# recipient (the contract will use msg.sender for both)
# Add 200% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.freeSlot(slotId)
let gasLimit = gas * 3
let overrides = TransactionOverrides(gasLimit: some (gasLimit))
trace "calling freeSlot on contract", estimatedGas = gas, gasLimit = gasLimit
freeSlot = market.contract.freeSlot(slotId, overrides)
discard await freeSlot.confirm(1)
except Marketplace_SlotIsFree as parent:
raise newException(
SlotStateMismatchError, "Failed to free slot, slot is already free", parent
)
method withdrawFunds(
market: OnChainMarket, requestId: RequestId
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to withdraw funds"):
discard await market.contract.withdrawFunds(requestId).confirm(1)
method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.isProofRequired(id, overrides)
except ProviderError as e:
if e.msgDetail.contains("Slot is free"):
except Marketplace_SlotIsFree:
return false
raise e
method willProofBeRequired*(market: OnChainMarket,
id: SlotId): Future[bool] {.async.} =
convertEthersError:
method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} =
convertEthersError("Failed to get future proof requirement"):
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.willProofBeRequired(id, overrides)
except ProviderError as e:
if e.msgDetail.contains("Slot is free"):
except Marketplace_SlotIsFree:
return false
raise e
method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} =
convertEthersError:
method getChallenge*(
market: OnChainMarket, id: SlotId
): Future[ProofChallenge] {.async.} =
convertEthersError("Failed to get proof challenge"):
let overrides = CallOverrides(blockTag: some BlockTag.pending)
return await market.contract.getChallenge(id, overrides)
method submitProof*(market: OnChainMarket,
id: SlotId,
proof: Groth16Proof) {.async.} =
convertEthersError:
discard await market.contract.submitProof(id, proof).confirm(0)
method markProofAsMissing*(market: OnChainMarket,
id: SlotId,
period: Period) {.async.} =
convertEthersError:
discard await market.contract.markProofAsMissing(id, period).confirm(0)
method canProofBeMarkedAsMissing*(
market: OnChainMarket,
id: SlotId,
period: Period
): Future[bool] {.async.} =
let provider = market.contract.provider
let contractWithoutSigner = market.contract.connect(provider)
let overrides = CallOverrides(blockTag: some BlockTag.pending)
method submitProof*(
market: OnChainMarket, id: SlotId, proof: Groth16Proof
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to submit proof"):
try:
discard await contractWithoutSigner.markProofAsMissing(id, period, overrides)
discard await market.contract.submitProof(id, proof).confirm(1)
except Proofs_InvalidProof as parent:
raise newException(
ProofInvalidError, "Failed to submit proof because the proof is invalid", parent
)
method markProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to mark proof as missing"):
# Add 50% to gas estimate to deal with different evm code flow when we
# happen to be the one to make the request fail
let gas = await market.contract.estimateGas.markProofAsMissing(id, period)
let gasLimit = (gas * 150) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
trace "calling markProofAsMissing on contract",
estimatedGas = gas, gasLimit = gasLimit
discard await market.contract.markProofAsMissing(id, period, overrides).confirm(1)
method canMarkProofAsMissing*(
market: OnChainMarket, id: SlotId, period: Period
): Future[bool] {.async: (raises: [CancelledError]).} =
try:
let overrides = CallOverrides(blockTag: some BlockTag.pending)
discard await market.contract.canMarkProofAsMissing(id, period, overrides)
return true
except EthersError as e:
trace "Proof cannot be marked as missing", msg = e.msg
return false
method subscribeRequests*(market: OnChainMarket,
callback: OnRequest):
Future[MarketSubscription] {.async.} =
proc onEvent(event: StorageRequested) {.upraises:[].} =
callback(event.requestId,
event.ask,
event.expiry)
method reserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
) {.async: (raises: [CancelledError, MarketError]).} =
convertEthersError("Failed to reserve slot"):
try:
# Add 25% to gas estimate to deal with different evm code flow when we
# happen to be the last one that is allowed to reserve the slot
let gas = await market.contract.estimateGas.reserveSlot(requestId, slotIndex)
let gasLimit = (gas * 125) div 100
let overrides = TransactionOverrides(gasLimit: some gasLimit)
convertEthersError:
trace "calling reserveSlot on contract", estimatedGas = gas, gasLimit = gasLimit
discard
await market.contract.reserveSlot(requestId, slotIndex, overrides).confirm(1)
except SlotReservations_ReservationNotAllowed:
raise newException(
SlotReservationNotAllowedError,
"Failed to reserve slot because reservation is not allowed",
)
method canReserveSlot*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[bool] {.async.} =
convertEthersError("Unable to determine if slot can be reserved"):
return await market.contract.canReserveSlot(requestId, slotIndex)
method subscribeRequests*(
market: OnChainMarket, callback: OnRequest
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!StorageRequested) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in Request subscription", msg = eventErr.msg
return
callback(event.requestId, event.ask, event.expiry)
convertEthersError("Failed to subscribe to StorageRequested events"):
let subscription = await market.contract.subscribe(StorageRequested, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(market: OnChainMarket,
callback: OnSlotFilled):
Future[MarketSubscription] {.async.} =
proc onEvent(event: SlotFilled) {.upraises:[].} =
method subscribeSlotFilled*(
market: OnChainMarket, callback: OnSlotFilled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFilled subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
let subscription = await market.contract.subscribe(SlotFilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeSlotFilled*(market: OnChainMarket,
method subscribeSlotFilled*(
market: OnChainMarket,
requestId: RequestId,
slotIndex: UInt256,
callback: OnSlotFilled):
Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) =
slotIndex: uint64,
callback: OnSlotFilled,
): Future[MarketSubscription] {.async.} =
proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: uint64) =
if eventRequestId == requestId and eventSlotIndex == slotIndex:
callback(requestId, slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFilled events"):
return await market.subscribeSlotFilled(onSlotFilled)
method subscribeSlotFreed*(market: OnChainMarket,
callback: OnSlotFreed):
Future[MarketSubscription] {.async.} =
proc onEvent(event: SlotFreed) {.upraises:[].} =
method subscribeSlotFreed*(
market: OnChainMarket, callback: OnSlotFreed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotFreed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotFreed subscription", msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError:
convertEthersError("Failed to subscribe to SlotFreed events"):
let subscription = await market.contract.subscribe(SlotFreed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(market: OnChainMarket,
callback: OnFulfillment):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
method subscribeSlotReservationsFull*(
market: OnChainMarket, callback: OnSlotReservationsFull
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!SlotReservationsFull) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in SlotReservationsFull subscription",
msg = eventErr.msg
return
callback(event.requestId, event.slotIndex)
convertEthersError("Failed to subscribe to SlotReservationsFull events"):
let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(
market: OnChainMarket, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeFulfillment(market: OnChainMarket,
requestId: RequestId,
callback: OnFulfillment):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestFulfilled) {.upraises:[].} =
method subscribeFulfillment(
market: OnChainMarket, requestId: RequestId, callback: OnFulfillment
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFulfilled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFulfillment subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFulfilled events"):
let subscription = await market.contract.subscribe(RequestFulfilled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(market: OnChainMarket,
callback: OnRequestCancelled):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestCancelled) {.upraises:[].} =
method subscribeRequestCancelled*(
market: OnChainMarket, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestCancelled*(market: OnChainMarket,
requestId: RequestId,
callback: OnRequestCancelled):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestCancelled) {.upraises:[].} =
method subscribeRequestCancelled*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestCancelled) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestCancelled subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestCancelled events"):
let subscription = await market.contract.subscribe(RequestCancelled, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(market: OnChainMarket,
callback: OnRequestFailed):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestFailed) {.upraises:[]} =
method subscribeRequestFailed*(
market: OnChainMarket, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeRequestFailed*(market: OnChainMarket,
requestId: RequestId,
callback: OnRequestFailed):
Future[MarketSubscription] {.async.} =
proc onEvent(event: RequestFailed) {.upraises:[]} =
method subscribeRequestFailed*(
market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!RequestFailed) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in RequestFailed subscription", msg = eventErr.msg
return
if event.requestId == requestId:
callback(event.requestId)
convertEthersError:
convertEthersError("Failed to subscribe to RequestFailed events"):
let subscription = await market.contract.subscribe(RequestFailed, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method subscribeProofSubmission*(market: OnChainMarket,
callback: OnProofSubmitted):
Future[MarketSubscription] {.async.} =
proc onEvent(event: ProofSubmitted) {.upraises: [].} =
method subscribeProofSubmission*(
market: OnChainMarket, callback: OnProofSubmitted
): Future[MarketSubscription] {.async.} =
proc onEvent(eventResult: ?!ProofSubmitted) {.raises: [].} =
without event =? eventResult, eventErr:
error "There was an error in ProofSubmitted subscription", msg = eventErr.msg
return
callback(event.id)
convertEthersError:
convertEthersError("Failed to subscribe to ProofSubmitted events"):
let subscription = await market.contract.subscribe(ProofSubmitted, onEvent)
return OnChainMarketSubscription(eventSubscription: subscription)
method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} =
await subscription.eventSubscription.unsubscribe()
method queryPastStorageRequests*(market: OnChainMarket,
blocksAgo: int):
Future[seq[PastStorageRequest]] {.async.} =
convertEthersError:
let contract = market.contract
let provider = contract.provider
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from block"):
return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest)
let head = await provider.getBlockNumber()
let fromBlock = BlockTag.init(head - blocksAgo.abs.u256)
method queryPastSlotFilledEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
let events = await contract.queryFilter(StorageRequested,
fromBlock,
BlockTag.latest)
return events.map(event =>
PastStorageRequest(
requestId: event.requestId,
ask: event.ask,
expiry: event.expiry
return await market.queryPastSlotFilledEvents(fromBlock)
method queryPastSlotFilledEvents*(
market: OnChainMarket, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.async.} =
convertEthersError("Failed to get past SlotFilled events from time"):
let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime)
return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock))
method queryPastStorageRequestedEvents*(
market: OnChainMarket, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events from block"):
return
await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest)
method queryPastStorageRequestedEvents*(
market: OnChainMarket, blocksAgo: int
): Future[seq[StorageRequested]] {.async.} =
convertEthersError("Failed to get past StorageRequested events"):
let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo)
return await market.queryPastStorageRequestedEvents(fromBlock)
method slotCollateral*(
market: OnChainMarket, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.async: (raises: [CancelledError]).} =
let slotid = slotId(requestId, slotIndex)
try:
let slotState = await market.slotState(slotid)
without request =? await market.getRequest(requestId):
return failure newException(
MarketError, "Failure calculating the slotCollateral, cannot get the request"
)
return market.slotCollateral(request.ask.collateralPerSlot, slotState)
except MarketError as error:
error "Error when trying to calculate the slotCollateral", error = error.msg
return failure error
method slotCollateral*(
market: OnChainMarket, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.raises: [].} =
if slotState == SlotState.Repair:
without repairRewardPercentage =?
market.configuration .? collateral .? repairRewardPercentage:
return failure newException(
MarketError,
"Failure calculating the slotCollateral, cannot get the reward percentage",
)
return success (
collateralPerSlot - (collateralPerSlot * repairRewardPercentage.u256).div(
100.u256
)
)
return success(collateralPerSlot)

View File

@ -16,55 +16,183 @@ export requests
type
Marketplace* = ref object of Contract
StorageRequested* = object of Event
requestId*: RequestId
ask*: StorageAsk
expiry*: UInt256
SlotFilled* = object of Event
requestId* {.indexed.}: RequestId
slotIndex*: UInt256
SlotFreed* = object of Event
requestId* {.indexed.}: RequestId
slotIndex*: UInt256
RequestFulfilled* = object of Event
requestId* {.indexed.}: RequestId
RequestCancelled* = object of Event
requestId* {.indexed.}: RequestId
RequestFailed* = object of Event
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of Event
id*: SlotId
Marketplace_RepairRewardPercentageTooHigh* = object of SolidityError
Marketplace_SlashPercentageTooHigh* = object of SolidityError
Marketplace_MaximumSlashingTooHigh* = object of SolidityError
Marketplace_InvalidExpiry* = object of SolidityError
Marketplace_InvalidMaxSlotLoss* = object of SolidityError
Marketplace_InsufficientSlots* = object of SolidityError
Marketplace_InvalidClientAddress* = object of SolidityError
Marketplace_RequestAlreadyExists* = object of SolidityError
Marketplace_InvalidSlot* = object of SolidityError
Marketplace_SlotNotFree* = object of SolidityError
Marketplace_InvalidSlotHost* = object of SolidityError
Marketplace_AlreadyPaid* = object of SolidityError
Marketplace_TransferFailed* = object of SolidityError
Marketplace_UnknownRequest* = object of SolidityError
Marketplace_InvalidState* = object of SolidityError
Marketplace_StartNotBeforeExpiry* = object of SolidityError
Marketplace_SlotNotAcceptingProofs* = object of SolidityError
Marketplace_SlotIsFree* = object of SolidityError
Marketplace_ReservationRequired* = object of SolidityError
Marketplace_NothingToWithdraw* = object of SolidityError
Marketplace_InsufficientDuration* = object of SolidityError
Marketplace_InsufficientProofProbability* = object of SolidityError
Marketplace_InsufficientCollateral* = object of SolidityError
Marketplace_InsufficientReward* = object of SolidityError
Marketplace_InvalidCid* = object of SolidityError
Marketplace_DurationExceedsLimit* = object of SolidityError
Proofs_InsufficientBlockHeight* = object of SolidityError
Proofs_InvalidProof* = object of SolidityError
Proofs_ProofAlreadySubmitted* = object of SolidityError
Proofs_PeriodNotEnded* = object of SolidityError
Proofs_ValidationTimedOut* = object of SolidityError
Proofs_ProofNotMissing* = object of SolidityError
Proofs_ProofNotRequired* = object of SolidityError
Proofs_ProofAlreadyMarkedMissing* = object of SolidityError
Periods_InvalidSecondsPerPeriod* = object of SolidityError
SlotReservations_ReservationNotAllowed* = object of SolidityError
proc config*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.}
proc token*(marketplace: Marketplace): Address {.contract, view.}
proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.}
proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
proc currentCollateral*(
marketplace: Marketplace, id: SlotId
): UInt256 {.contract, view.}
proc requestStorage*(
marketplace: Marketplace, request: StorageRequest
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_RequestAlreadyExists,
Marketplace_InvalidExpiry, Marketplace_InsufficientSlots,
Marketplace_InvalidMaxSlotLoss, Marketplace_InsufficientDuration,
Marketplace_InsufficientProofProbability, Marketplace_InsufficientCollateral,
Marketplace_InsufficientReward, Marketplace_InvalidCid,
]
.}
proc fillSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64, proof: Groth16Proof
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlot, Marketplace_ReservationRequired, Marketplace_SlotNotFree,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc withdrawFunds*(
marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address
): Confirmable {.
contract,
errors: [
Marketplace_InvalidClientAddress, Marketplace_InvalidState,
Marketplace_NothingToWithdraw, Marketplace_UnknownRequest,
]
.}
proc freeSlot*(
marketplace: Marketplace, id: SlotId
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc freeSlot*(
marketplace: Marketplace,
id: SlotId,
rewardRecipient: Address,
collateralRecipient: Address,
): Confirmable {.
contract,
errors: [
Marketplace_InvalidSlotHost, Marketplace_AlreadyPaid,
Marketplace_StartNotBeforeExpiry, Marketplace_UnknownRequest, Marketplace_SlotIsFree,
]
.}
proc getRequest*(
marketplace: Marketplace, id: RequestId
): StorageRequest {.contract, view, errors: [Marketplace_UnknownRequest].}
proc requestStorage*(marketplace: Marketplace, request: StorageRequest): ?TransactionResponse {.contract.}
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): ?TransactionResponse {.contract.}
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): ?TransactionResponse {.contract.}
proc freeSlot*(marketplace: Marketplace, id: SlotId): ?TransactionResponse {.contract.}
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}
proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.}
proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.}
proc getActiveSlot*(
marketplace: Marketplace, id: SlotId
): Slot {.contract, view, errors: [Marketplace_SlotIsFree].}
proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.}
proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.}
proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.}
proc requestState*(
marketplace: Marketplace, requestId: RequestId
): RequestState {.contract, view, errors: [Marketplace_UnknownRequest].}
proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.}
proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.}
proc requestEnd*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.}
proc requestExpiry*(
marketplace: Marketplace, requestId: RequestId
): SecondsSince1970 {.contract, view.}
proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.}
proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.}
proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.}
proc getChallenge*(
marketplace: Marketplace, id: SlotId
): array[32, byte] {.contract, view.}
proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.}
proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): ?TransactionResponse {.contract.}
proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): ?TransactionResponse {.contract.}
proc submitProof*(
marketplace: Marketplace, id: SlotId, proof: Groth16Proof
): Confirmable {.
contract,
errors:
[Proofs_ProofAlreadySubmitted, Proofs_InvalidProof, Marketplace_UnknownRequest]
.}
proc markProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Marketplace_StartNotBeforeExpiry,
Proofs_PeriodNotEnded, Proofs_ValidationTimedOut, Proofs_ProofNotMissing,
Proofs_ProofNotRequired, Proofs_ProofAlreadyMarkedMissing,
]
.}
proc canMarkProofAsMissing*(
marketplace: Marketplace, id: SlotId, period: uint64
): Confirmable {.
contract,
errors: [
Marketplace_SlotNotAcceptingProofs, Proofs_PeriodNotEnded,
Proofs_ValidationTimedOut, Proofs_ProofNotMissing, Proofs_ProofNotRequired,
Proofs_ProofAlreadyMarkedMissing,
]
.}
proc reserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): Confirmable {.contract.}
proc canReserveSlot*(
marketplace: Marketplace, requestId: RequestId, slotIndex: uint64
): bool {.contract, view.}

View File

@ -1,19 +1,22 @@
import pkg/stint
import pkg/contractabi
import pkg/ethers/fields
import pkg/ethers/contracts/fields
type
Groth16Proof* = object
a*: G1Point
b*: G2Point
c*: G1Point
G1Point* = object
x*: UInt256
y*: UInt256
# A field element F_{p^2} encoded as `real + i * imag`
Fp2Element* = object
real*: UInt256
imag*: UInt256
G2Point* = object
x*: Fp2Element
y*: Fp2Element

View File

@ -0,0 +1,123 @@
import pkg/ethers/provider
import pkg/chronos
import pkg/questionable
import ../logutils
from ../clock import SecondsSince1970
logScope:
topics = "marketplace onchain provider"
proc raiseProviderError(message: string) {.raises: [ProviderError].} =
raise newException(ProviderError, message)
proc blockNumberAndTimestamp*(
provider: Provider, blockTag: BlockTag
): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError, CancelledError]).} =
without latestBlock =? await provider.getBlock(blockTag):
raiseProviderError("Could not get latest block")
without latestBlockNumber =? latestBlock.number:
raiseProviderError("Could not get latest block number")
return (latestBlockNumber, latestBlock.timestamp)
proc binarySearchFindClosestBlock(
provider: Provider, epochTime: int, low: UInt256, high: UInt256
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low))
let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high))
if abs(lowTimestamp.truncate(int) - epochTime) <
abs(highTimestamp.truncate(int) - epochTime):
return low
else:
return high
proc binarySearchBlockNumberForEpoch(
provider: Provider,
epochTime: UInt256,
latestBlockNumber: UInt256,
earliestBlockNumber: UInt256,
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
var low = earliestBlockNumber
var high = latestBlockNumber
while low <= high:
if low == 0 and high == 0:
return low
let mid = (low + high) div 2
let (midBlockNumber, midBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.init(mid))
if midBlockTimestamp < epochTime:
low = mid + 1
elif midBlockTimestamp > epochTime:
high = mid - 1
else:
return midBlockNumber
# NOTICE that by how the binary search is implemented, when it finishes
# low is always greater than high - this is why we use high, where
# intuitively we would use low:
await provider.binarySearchFindClosestBlock(
epochTime.truncate(int), low = high, high = low
)
proc blockNumberForEpoch*(
provider: Provider, epochTime: SecondsSince1970
): Future[UInt256] {.async: (raises: [ProviderError, CancelledError]).} =
let epochTimeUInt256 = epochTime.u256
let (latestBlockNumber, latestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.latest)
let (earliestBlockNumber, earliestBlockTimestamp) =
await provider.blockNumberAndTimestamp(BlockTag.earliest)
# Initially we used the average block time to predict
# the number of blocks we need to look back in order to find
# the block number corresponding to the given epoch time.
# This estimation can be highly inaccurate if block time
# was changing in the past or is fluctuating and therefore
# we used that information initially only to find out
# if the available history is long enough to perform effective search.
# It turns out we do not have to do that. There is an easier way.
#
# First we check if the given epoch time equals the timestamp of either
# the earliest or the latest block. If it does, we just return the
# block number of that block.
#
# Otherwise, if the earliest available block is not the genesis block,
# we should check the timestamp of that earliest block and if it is greater
# than the epoch time, we should issue a warning and return
# that earliest block number.
# In all other cases, thus when the earliest block is not the genesis
# block but its timestamp is not greater than the requested epoch time, or
# if the earliest available block is the genesis block,
# (which means we have the whole history available), we should proceed with
# the binary search.
#
# Additional benefit of this method is that we do not have to rely
# on the average block time, which not only makes the whole thing
# more reliable, but also easier to test.
# Are lucky today?
if earliestBlockTimestamp == epochTimeUInt256:
return earliestBlockNumber
if latestBlockTimestamp == epochTimeUInt256:
return latestBlockNumber
if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256:
let availableHistoryInDays =
(latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256
warn "Short block history detected.",
earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays
return earliestBlockNumber
return await provider.binarySearchBlockNumberForEpoch(
epochTimeUInt256, latestBlockNumber, earliestBlockNumber
)
proc pastBlockTag*(
provider: Provider, blocksAgo: int
): Future[BlockTag] {.async: (raises: [ProviderError, CancelledError]).} =
let head = await provider.getBlockNumber()
return BlockTag.init(head - blocksAgo.abs.u256)

View File

@ -2,13 +2,14 @@ import std/hashes
import std/sequtils
import std/typetraits
import pkg/contractabi
import pkg/nimcrypto
import pkg/ethers/fields
import pkg/nimcrypto/keccak
import pkg/ethers/contracts/fields
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/upraises
import pkg/libp2p/[cid, multicodec]
import ../logutils
import ../utils/json
from ../errors import mapFailure
export contractabi
@ -17,22 +18,26 @@ type
client* {.serialize.}: Address
ask* {.serialize.}: StorageAsk
content* {.serialize.}: StorageContent
expiry* {.serialize.}: UInt256
expiry* {.serialize.}: uint64
nonce*: Nonce
StorageAsk* = object
slots* {.serialize.}: uint64
slotSize* {.serialize.}: UInt256
duration* {.serialize.}: UInt256
proofProbability* {.serialize.}: UInt256
reward* {.serialize.}: UInt256
collateral* {.serialize.}: UInt256
pricePerBytePerSecond* {.serialize.}: UInt256
collateralPerByte* {.serialize.}: UInt256
slots* {.serialize.}: uint64
slotSize* {.serialize.}: uint64
duration* {.serialize.}: uint64
maxSlotLoss* {.serialize.}: uint64
StorageContent* = object
cid* {.serialize.}: string
cid* {.serialize.}: Cid
merkleRoot*: array[32, byte]
Slot* = object
request* {.serialize.}: StorageRequest
slotIndex* {.serialize.}: UInt256
slotIndex* {.serialize.}: uint64
SlotId* = distinct array[32, byte]
RequestId* = distinct array[32, byte]
Nonce* = distinct array[32, byte]
@ -42,6 +47,7 @@ type
Cancelled
Finished
Failed
SlotState* {.pure.} = enum
Free
Filled
@ -49,6 +55,7 @@ type
Failed
Paid
Cancelled
Repair
proc `==`*(x, y: Nonce): bool {.borrow.}
proc `==`*(x, y: RequestId): bool {.borrow.}
@ -80,44 +87,43 @@ proc toHex*[T: distinct](id: T): string =
type baseType = T.distinctBase
baseType(id).toHex
logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog
logutils.formatIt(LogFormat.textLines, Nonce):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, RequestId):
it.short0xHexLog
logutils.formatIt(LogFormat.textLines, SlotId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, Nonce):
it.to0xHexLog
logutils.formatIt(LogFormat.json, RequestId):
it.to0xHexLog
logutils.formatIt(LogFormat.json, SlotId):
it.to0xHexLog
func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest =
StorageRequest(
client: tupl[0],
ask: tupl[1],
content: tupl[2],
expiry: tupl[3],
nonce: tupl[4]
client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4]
)
func fromTuple(_: type Slot, tupl: tuple): Slot =
Slot(
request: tupl[0],
slotIndex: tupl[1]
)
Slot(request: tupl[0], slotIndex: tupl[1])
func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk =
StorageAsk(
slots: tupl[0],
slotSize: tupl[1],
duration: tupl[2],
proofProbability: tupl[3],
reward: tupl[4],
collateral: tupl[5],
maxSlotLoss: tupl[6]
proofProbability: tupl[0],
pricePerBytePerSecond: tupl[1],
collateralPerByte: tupl[2],
slots: tupl[3],
slotSize: tupl[4],
duration: tupl[5],
maxSlotLoss: tupl[6],
)
func fromTuple(_: type StorageContent, tupl: tuple): StorageContent =
StorageContent(
cid: tupl[0],
merkleRoot: tupl[1]
)
StorageContent(cid: tupl[0], merkleRoot: tupl[1])
func solidityType*(_: type Cid): string =
solidityType(seq[byte])
func solidityType*(_: type StorageContent): string =
solidityType(StorageContent.fieldTypes)
@ -128,6 +134,10 @@ func solidityType*(_: type StorageAsk): string =
func solidityType*(_: type StorageRequest): string =
solidityType(StorageRequest.fieldTypes)
# Note: it seems to be ok to ignore the vbuffer offset for now
func encode*(encoder: var AbiEncoder, cid: Cid) =
encoder.write(cid.data.buffer)
func encode*(encoder: var AbiEncoder, content: StorageContent) =
encoder.write(content.fieldValues)
@ -140,8 +150,12 @@ func encode*(encoder: var AbiEncoder, id: RequestId | SlotId | Nonce) =
func encode*(encoder: var AbiEncoder, request: StorageRequest) =
encoder.write(request.fieldValues)
func encode*(encoder: var AbiEncoder, request: Slot) =
encoder.write(request.fieldValues)
func encode*(encoder: var AbiEncoder, slot: Slot) =
encoder.write(slot.fieldValues)
func decode*(decoder: var AbiDecoder, T: type Cid): ?!T =
let data = ?decoder.read(seq[byte])
Cid.init(data).mapFailure
func decode*(decoder: var AbiDecoder, T: type StorageContent): ?!T =
let tupl = ?decoder.read(StorageContent.fieldTypes)
@ -163,24 +177,30 @@ func id*(request: StorageRequest): RequestId =
let encoding = AbiEncoder.encode((request,))
RequestId(keccak256.digest(encoding).data)
func slotId*(requestId: RequestId, slot: UInt256): SlotId =
let encoding = AbiEncoder.encode((requestId, slot))
func slotId*(requestId: RequestId, slotIndex: uint64): SlotId =
let encoding = AbiEncoder.encode((requestId, slotIndex))
SlotId(keccak256.digest(encoding).data)
func slotId*(request: StorageRequest, slot: UInt256): SlotId =
slotId(request.id, slot)
func slotId*(request: StorageRequest, slotIndex: uint64): SlotId =
slotId(request.id, slotIndex)
func id*(slot: Slot): SlotId =
slotId(slot.request, slot.slotIndex)
func pricePerSlot*(ask: StorageAsk): UInt256 =
ask.duration * ask.reward
func pricePerSlotPerSecond*(ask: StorageAsk): UInt256 =
ask.pricePerBytePerSecond * ask.slotSize.u256
func price*(ask: StorageAsk): UInt256 =
func pricePerSlot*(ask: StorageAsk): UInt256 =
ask.duration.u256 * ask.pricePerSlotPerSecond
func totalPrice*(ask: StorageAsk): UInt256 =
ask.slots.u256 * ask.pricePerSlot
func price*(request: StorageRequest): UInt256 =
request.ask.price
func totalPrice*(request: StorageRequest): UInt256 =
request.ask.totalPrice
func size*(ask: StorageAsk): UInt256 =
ask.slots.u256 * ask.slotSize
func collateralPerSlot*(ask: StorageAsk): UInt256 =
ask.collateralPerByte * ask.slotSize.u256
func size*(ask: StorageAsk): uint64 =
ask.slots * ask.slotSize

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,16 +7,19 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [].}
import std/algorithm
import std/net
import std/sequtils
import pkg/chronos
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
import pkg/questionable
import pkg/questionable/results
import pkg/stew/shims/net
import pkg/contractabi/address as ca
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
from pkg/nimcrypto import keccak256
import ./rng
import ./errors
@ -31,15 +34,16 @@ export discv5
logScope:
topics = "codex discovery"
type
Discovery* = ref object of RootObj
type Discovery* = ref object of RootObj
protocol*: discv5.Protocol # dht protocol
key: PrivateKey # private key
peerId: PeerId # the peer id of the local node
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
providerRecord*: ?SignedPeerRecord
# record to advertice node connection information, this carry any
# address that the node can be connected on
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
isStarted: bool
proc toNodeId*(cid: Cid): NodeId =
## Cid to discovery id
@ -54,51 +58,70 @@ proc toNodeId*(host: ca.Address): NodeId =
readUintBE[256](keccak256.digest(host.toArray).data)
proc findPeer*(
d: Discovery,
peerId: PeerId): Future[?PeerRecord] {.async.} =
d: Discovery, peerId: PeerId
): Future[?PeerRecord] {.async: (raises: [CancelledError]).} =
trace "protocol.resolve..."
## Find peer using the given Discovery object
##
let
node = await d.protocol.resolve(toNodeId(peerId))
try:
let node = await d.protocol.resolve(toNodeId(peerId))
return
if node.isSome():
node.get().record.data.some
else:
PeerRecord.none
except CancelledError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding peer", peerId = peerId, exc = exc.msg
return PeerRecord.none
method find*(
d: Discovery,
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
d: Discovery, cid: Cid
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find block providers
##
without providers =?
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
try:
without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure,
error:
warn "Error finding providers for block", cid, error = error.msg
return providers.filterIt(not (it.data.peerId == d.peerId))
except CancelledError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for block", cid, exc = exc.msg
method provide*(d: Discovery, cid: Cid) {.async, base.} =
method provide*(d: Discovery, cid: Cid) {.async: (raises: [CancelledError]), base.} =
## Provide a block Cid
##
let
nodes = await d.protocol.addProvider(
cid.toNodeId(), d.providerRecord.get)
try:
let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get)
if nodes.len <= 0:
warn "Couldn't provide to any nodes!"
except CancelledError as exc:
warn "Error providing block", cid, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing block", cid, exc = exc.msg
method find*(
d: Discovery,
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
d: Discovery, host: ca.Address
): Future[seq[SignedPeerRecord]] {.async: (raises: [CancelledError]), base.} =
## Find host providers
##
try:
trace "Finding providers for host", host = $host
without var providers =?
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure,
error:
trace "Error finding providers for host", host = $host, exc = error.msg
return
@ -110,26 +133,46 @@ method find*(
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
return providers
except CancelledError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error finding providers for host", host = $host, exc = exc.msg
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
method provide*(
d: Discovery, host: ca.Address
) {.async: (raises: [CancelledError]), base.} =
## Provide hosts
##
try:
trace "Providing host", host = $host
let
nodes = await d.protocol.addProvider(
host.toNodeId(), d.providerRecord.get)
let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get)
if nodes.len > 0:
trace "Provided to nodes", nodes = nodes.len
except CancelledError as exc:
warn "Error providing host", host = $host, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error providing host", host = $host, exc = exc.msg
method removeProvider*(
d: Discovery,
peerId: PeerId): Future[void] {.base.} =
d: Discovery, peerId: PeerId
): Future[void] {.base, async: (raises: [CancelledError]).} =
## Remove provider from providers table
##
trace "Removing provider", peerId
d.protocol.removeProvidersLocal(peerId)
try:
await d.protocol.removeProvidersLocal(peerId)
except CancelledError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
raise exc
except CatchableError as exc:
warn "Error removing provider", peerId = peerId, exc = exc.msg
except Exception as exc: # Something in discv5 is raising Exception
warn "Error removing provider", peerId = peerId, exc = exc.msg
raiseAssert("Unexpected Exception in removeProvider")
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
@ -137,54 +180,58 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
d.announceAddrs = @addrs
trace "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord.init(
d.key, PeerRecord.init(d.peerId, d.announceAddrs))
info "Updating announce record", addrs = d.announceAddrs
d.providerRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, d.announceAddrs))
.expect("Should construct signed record").some
if not d.protocol.isNil:
d.protocol.updateRecord(d.providerRecord)
.expect("Should update SPR")
d.protocol.updateRecord(d.providerRecord).expect("Should update SPR")
proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
## Update providers record
##
trace "Updating Dht record", ip, port = $port
d.dhtRecord = SignedPeerRecord.init(
d.key, PeerRecord.init(d.peerId, @[
MultiAddress.init(
ip,
IpTransportProtocol.udpProtocol,
port)])).expect("Should construct signed record").some
info "Updating Dht record", addrs = addrs
d.dhtRecord = SignedPeerRecord
.init(d.key, PeerRecord.init(d.peerId, @addrs))
.expect("Should construct signed record").some
if not d.protocol.isNil:
d.protocol.updateRecord(d.dhtRecord)
.expect("Should update SPR")
d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR")
proc start*(d: Discovery) {.async.} =
proc start*(d: Discovery) {.async: (raises: []).} =
try:
d.protocol.open()
await d.protocol.start()
d.isStarted = true
except CatchableError as exc:
error "Error starting discovery", exc = exc.msg
proc stop*(d: Discovery) {.async.} =
await d.protocol.closeWait()
proc stop*(d: Discovery) {.async: (raises: []).} =
if not d.isStarted:
warn "Discovery not started, skipping stop"
return
try:
await noCancel d.protocol.closeWait()
except CatchableError as exc:
error "Error stopping discovery", exc = exc.msg
proc new*(
T: type Discovery,
key: PrivateKey,
bindIp = ValidIpAddress.init(IPv4_any()),
bindIp = IPv4_any(),
bindPort = 0.Port,
announceAddrs: openArray[MultiAddress],
bootstrapNodes: openArray[SignedPeerRecord] = [],
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"),
): Discovery =
## Create a new Discovery node instance for the given key and datastore
##
var
self = Discovery(
key: key,
peerId: PeerId.init(key).expect("Should construct PeerId"))
var self =
Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId"))
self.updateAnnounceRecord(announceAddrs)
@ -192,22 +239,20 @@ proc new*(
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
# and figure out proper solution.
let discoveryConfig = DiscoveryConfig(
tableIpLimits: TableIpLimits(
tableIpLimit: high(uint),
bucketIpLimit:high(uint)
),
bitsPerHop: DefaultBitsPerHop
tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)),
bitsPerHop: DefaultBitsPerHop,
)
# --------------------------------------------------------------------------
self.protocol = newProtocol(
key,
bindIp = bindIp.toNormalIp,
bindIp = bindIp,
bindPort = bindPort,
record = self.providerRecord.get,
bootstrapRecords = bootstrapNodes,
rng = Rng.instance(),
providers = ProvidersManager.new(store),
config = discoveryConfig)
config = discoveryConfig,
)
self

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))

View File

@ -1,211 +0,0 @@
## Nim-Codex
## Copyright (c) 2024 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/taskpools
import pkg/taskpools/flowvars
import pkg/chronos
import pkg/chronos/threadsync
import pkg/questionable/results
import ./backend
import ../errors
import ../logutils
logScope:
topics = "codex asyncerasure"
const
CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal
CompletitionRetryDelay = 10.millis
type
EncoderBackendPtr = ptr EncoderBackend
DecoderBackendPtr = ptr DecoderBackend
# Args objects are missing seq[seq[byte]] field, to avoid unnecessary data copy
EncodeTaskArgs = object
signal: ThreadSignalPtr
backend: EncoderBackendPtr
blockSize: int
ecM: int
DecodeTaskArgs = object
signal: ThreadSignalPtr
backend: DecoderBackendPtr
blockSize: int
ecK: int
SharedArrayHolder*[T] = object
data: ptr UncheckedArray[T]
size: int
EncodeTaskResult = Result[SharedArrayHolder[byte], cstring]
DecodeTaskResult = Result[SharedArrayHolder[byte], cstring]
proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult =
var
data = data.unsafeAddr
parity = newSeqWith[seq[byte]](args.ecM, newSeq[byte](args.blockSize))
try:
let res = args.backend[].encode(data[], parity)
if res.isOk:
let
resDataSize = parity.len * args.blockSize
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
arrHolder = SharedArrayHolder[byte](
data: resData,
size: resDataSize
)
for i in 0..<parity.len:
copyMem(addr resData[i * args.blockSize], addr parity[i][0], args.blockSize)
return ok(arrHolder)
else:
return err(res.error)
except CatchableError as exception:
return err(exception.msg.cstring)
finally:
if err =? args.signal.fireSync().mapFailure.errorOption():
error "Error firing signal", msg = err.msg
proc decodeTask(args: DecodeTaskArgs, data: seq[seq[byte]], parity: seq[seq[byte]]): DecodeTaskResult =
var
data = data.unsafeAddr
parity = parity.unsafeAddr
recovered = newSeqWith[seq[byte]](args.ecK, newSeq[byte](args.blockSize))
try:
let res = args.backend[].decode(data[], parity[], recovered)
if res.isOk:
let
resDataSize = recovered.len * args.blockSize
resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize))
arrHolder = SharedArrayHolder[byte](
data: resData,
size: resDataSize
)
for i in 0..<recovered.len:
copyMem(addr resData[i * args.blockSize], addr recovered[i][0], args.blockSize)
return ok(arrHolder)
else:
return err(res.error)
except CatchableError as exception:
return err(exception.msg.cstring)
finally:
if err =? args.signal.fireSync().mapFailure.errorOption():
error "Error firing signal", msg = err.msg
proc proxySpawnEncodeTask(
tp: Taskpool,
args: EncodeTaskArgs,
data: ref seq[seq[byte]]
): Flowvar[EncodeTaskResult] =
tp.spawn encodeTask(args, data[])
proc proxySpawnDecodeTask(
tp: Taskpool,
args: DecodeTaskArgs,
data: ref seq[seq[byte]],
parity: ref seq[seq[byte]]
): Flowvar[DecodeTaskResult] =
tp.spawn decodeTask(args, data[], parity[])
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
await wait(signal)
var
res: T
awaitTotal: Duration
while awaitTotal < CompletitionTimeout:
if handle.tryComplete(res):
return success(res)
else:
awaitTotal += CompletitionRetryDelay
await sleepAsync(CompletitionRetryDelay)
return failure("Task signaled finish but didn't return any result within " & $CompletitionRetryDelay)
proc asyncEncode*(
tp: Taskpool,
backend: EncoderBackend,
data: ref seq[seq[byte]],
blockSize: int,
ecM: int
): Future[?!ref seq[seq[byte]]] {.async.} =
without signal =? ThreadSignalPtr.new().mapFailure, err:
return failure(err)
try:
let
blockSize = data[0].len
args = EncodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecM: ecM)
handle = proxySpawnEncodeTask(tp, args, data)
without res =? await awaitResult(signal, handle), err:
return failure(err)
if res.isOk:
var parity = seq[seq[byte]].new()
parity[].setLen(ecM)
for i in 0..<parity[].len:
parity[i] = newSeq[byte](blockSize)
copyMem(addr parity[i][0], addr res.value.data[i * blockSize], blockSize)
deallocShared(res.value.data)
return success(parity)
else:
return failure($res.error)
finally:
if err =? signal.close().mapFailure.errorOption():
error "Error closing signal", msg = $err.msg
proc asyncDecode*(
tp: Taskpool,
backend: DecoderBackend,
data, parity: ref seq[seq[byte]],
blockSize: int
): Future[?!ref seq[seq[byte]]] {.async.} =
without signal =? ThreadSignalPtr.new().mapFailure, err:
return failure(err)
try:
let
ecK = data[].len
args = DecodeTaskArgs(signal: signal, backend: unsafeAddr backend, blockSize: blockSize, ecK: ecK)
handle = proxySpawnDecodeTask(tp, args, data, parity)
without res =? await awaitResult(signal, handle), err:
return failure(err)
if res.isOk:
var recovered = seq[seq[byte]].new()
recovered[].setLen(ecK)
for i in 0..<recovered[].len:
recovered[i] = newSeq[byte](blockSize)
copyMem(addr recovered[i][0], addr res.value.data[i * blockSize], blockSize)
deallocShared(res.value.data)
return success(recovered)
else:
return failure($res.error)
finally:
if err =? signal.close().mapFailure.errorOption():
error "Error closing signal", msg = $err.msg

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
{.push raises: [], gcsafe.}
import ../stores
@ -22,26 +20,25 @@ type
EncoderBackend* = ref object of ErasureBackend
DecoderBackend* = ref object of ErasureBackend
method release*(self: ErasureBackend) {.base.} =
method release*(self: ErasureBackend) {.base, gcsafe.} =
## release the backend
##
raiseAssert("not implemented!")
method encode*(
self: EncoderBackend,
buffers,
parity: var openArray[seq[byte]]
): Result[void, cstring] {.base.} =
buffers, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## encode buffers using a backend
##
raiseAssert("not implemented!")
method decode*(
self: DecoderBackend,
buffers,
parity,
recovered: var openArray[seq[byte]]
): Result[void, cstring] {.base.} =
buffers, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] {.base, gcsafe.} =
## decode buffers using a backend
##
raiseAssert("not implemented!")

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -10,7 +10,7 @@
import std/options
import pkg/leopard
import pkg/stew/results
import pkg/results
import ../backend
@ -23,42 +23,38 @@ type
method encode*(
self: LeoEncoderBackend,
data,
parity: var openArray[seq[byte]]): Result[void, cstring] =
data, parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen: int,
): Result[void, cstring] =
## Encode data using Leopard backend
if parity.len == 0:
if parityLen == 0:
return ok()
var encoder = if self.encoder.isNone:
self.encoder = (? LeoEncoder.init(
self.blockSize,
self.buffers,
self.parity)).some
var encoder =
if self.encoder.isNone:
self.encoder = (?LeoEncoder.init(self.blockSize, self.buffers, self.parity)).some
self.encoder.get()
else:
self.encoder.get()
encoder.encode(data, parity)
encoder.encode(data, parity, dataLen, parityLen)
method decode*(
self: LeoDecoderBackend,
data,
parity,
recovered: var openArray[seq[byte]]): Result[void, cstring] =
data, parity, recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
dataLen, parityLen, recoveredLen: int,
): Result[void, cstring] =
## Decode data using given Leopard backend
var decoder =
if self.decoder.isNone:
self.decoder = (? LeoDecoder.init(
self.blockSize,
self.buffers,
self.parity)).some
self.decoder = (?LeoDecoder.init(self.blockSize, self.buffers, self.parity)).some
self.decoder.get()
else:
self.decoder.get()
decoder.decode(data, parity, recovered)
decoder.decode(data, parity, recovered, dataLen, parityLen, recoveredLen)
method release*(self: LeoEncoderBackend) =
if self.encoder.isSome:
@ -69,25 +65,15 @@ method release*(self: LeoDecoderBackend) =
self.decoder.get().free()
proc new*(
T: type LeoEncoderBackend,
blockSize,
buffers,
parity: int): LeoEncoderBackend =
T: type LeoEncoderBackend, blockSize, buffers, parity: int
): LeoEncoderBackend =
## Create an instance of an Leopard Encoder backend
##
LeoEncoderBackend(
blockSize: blockSize,
buffers: buffers,
parity: parity)
LeoEncoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)
proc new*(
T: type LeoDecoderBackend,
blockSize,
buffers,
parity: int): LeoDecoderBackend =
T: type LeoDecoderBackend, blockSize, buffers, parity: int
): LeoDecoderBackend =
## Create an instance of an Leopard Decoder backend
##
LeoDecoderBackend(
blockSize: blockSize,
buffers: buffers,
parity: parity)
LeoDecoderBackend(blockSize: blockSize, buffers: buffers, parity: parity)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,14 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
{.push raises: [], gcsafe.}
push: {.upraises: [].}
import std/sequtils
import std/sugar
import std/[sugar, atomics, sequtils]
import pkg/chronos
import pkg/chronos/threadsync
import pkg/chronicles
import pkg/libp2p/[multicodec, cid, multihash]
import pkg/libp2p/protobuf/minprotobuf
import pkg/taskpools
@ -23,15 +22,17 @@ import ../logutils
import ../manifest
import ../merkletree
import ../stores
import ../clock
import ../blocktype as bt
import ../utils
import ../utils/asynciter
import ../indexingstrategy
import ../errors
import ../utils/arrayutils
import pkg/stew/byteutils
import ./backend
import ./asyncbackend
export backend
@ -61,18 +62,17 @@ type
## columns (with up to M blocks missing per column),
## or any combination there of.
##
EncoderProvider* =
proc(size, blocks, parity: int): EncoderBackend {.raises: [Defect], noSideEffect.}
EncoderProvider* = proc(size, blocks, parity: int): EncoderBackend
{.raises: [Defect], noSideEffect.}
DecoderProvider* = proc(size, blocks, parity: int): DecoderBackend
{.raises: [Defect], noSideEffect.}
DecoderProvider* =
proc(size, blocks, parity: int): DecoderBackend {.raises: [Defect], noSideEffect.}
Erasure* = ref object
taskPool: Taskpool
encoderProvider*: EncoderProvider
decoderProvider*: DecoderProvider
store*: BlockStore
taskpool: Taskpool
EncodingParams = object
ecK: Natural
@ -82,6 +82,31 @@ type
blocksCount: Natural
strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
EncodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen, parityLen: int
signal: ThreadSignalPtr
DecodeTask = object
success: Atomic[bool]
erasure: ptr Erasure
blocks: ptr UncheckedArray[ptr UncheckedArray[byte]]
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
blockSize, blocksLen: int
parityLen, recoveredLen: int
signal: ThreadSignalPtr
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
@ -93,21 +118,25 @@ func indexToPos(steps, idx, step: int): int {.inline.} =
(idx - step) div steps
proc getPendingBlocks(
self: Erasure,
manifest: Manifest,
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
self: Erasure, manifest: Manifest, indices: seq[int]
): AsyncIter[(?!bt.Block, int)] =
## Get pending blocks iterator
##
var pendingBlocks: seq[Future[(?!bt.Block, int)]] = @[]
var
proc attachIndex(
fut: Future[?!bt.Block], i: int
): Future[(?!bt.Block, int)] {.async.} =
## avoids closure capture issues
return (await fut, i)
for blockIndex in indices:
# request blocks from the store
pendingBlocks = indicies.map( (i: int) =>
self.store.getBlock(
BlockAddress.init(manifest.treeCid, i)
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
)
let fut = self.store.getBlock(BlockAddress.init(manifest.treeCid, blockIndex))
pendingBlocks.add(attachIndex(fut, blockIndex))
proc isFinished(): bool = pendingBlocks.len == 0
proc isFinished(): bool =
pendingBlocks.len == 0
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
let completedFut = await one(pendingBlocks)
@ -118,7 +147,9 @@ proc getPendingBlocks(
let (_, index) = await completedFut
raise newException(
CatchableError,
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " &
$index,
)
AsyncIter[(?!bt.Block, int)].new(genNext, isFinished)
@ -129,25 +160,25 @@ proc prepareEncodingData(
step: Natural,
data: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
emptyBlock: seq[byte],
): Future[?!Natural] {.async.} =
## Prepare data for encoding
##
let
strategy = params.strategy.init(
firstIndex = 0,
lastIndex = params.rounded - 1,
iterations = params.steps
firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter =
self.getPendingBlocks(manifest, indices.filterIt(it < manifest.blocksCount))
var resolved = 0
for fut in pendingBlocksIter:
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
continue
warn "Failed retrieving a block", treeCid = manifest.treeCid, idx, msg = err.msg
return failure(err)
let pos = indexToPos(params.steps, idx, step)
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
@ -155,11 +186,12 @@ proc prepareEncodingData(
resolved.inc()
for idx in indicies.filterIt(it >= manifest.blocksCount):
for idx in indices.filterIt(it >= manifest.blocksCount):
let pos = indexToPos(params.steps, idx, step)
trace "Padding with empty block", idx
shallowCopy(data[pos], emptyBlock)
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec),
err:
return failure(err)
cids[idx] = emptyBlockCid
@ -172,7 +204,8 @@ proc prepareDecodingData(
data: ref seq[seq[byte]],
parityData: ref seq[seq[byte]],
cids: ref seq[Cid],
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
emptyBlock: seq[byte],
): Future[?!(Natural, Natural)] {.async.} =
## Prepare data for decoding
## `encoded` - the encoded manifest
## `step` - the current step
@ -184,12 +217,10 @@ proc prepareDecodingData(
let
strategy = encoded.protectedStrategy.init(
firstIndex = 0,
lastIndex = encoded.blocksCount - 1,
iterations = encoded.steps
firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps
)
indicies = toSeq(strategy.getIndicies(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
indices = toSeq(strategy.getIndices(step))
pendingBlocksIter = self.getPendingBlocks(encoded, indices)
var
dataPieces = 0
@ -203,11 +234,10 @@ proc prepareDecodingData(
let (blkOrErr, idx) = await fut
without blk =? blkOrErr, err:
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
trace "Failed retrieving a block", idx, treeCid = encoded.treeCid, msg = err.msg
continue
let
pos = indexToPos(encoded.steps, idx, step)
let pos = indexToPos(encoded.steps, idx, step)
logScope:
cid = blk.cid
@ -219,7 +249,9 @@ proc prepareDecodingData(
cids[idx] = blk.cid
if idx >= encoded.rounded:
trace "Retrieved parity block"
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
shallowCopy(
parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data
)
parityPieces.inc
else:
trace "Retrieved data block"
@ -233,14 +265,18 @@ proc prepareDecodingData(
proc init*(
_: type EncodingParams,
manifest: Manifest,
ecK: Natural, ecM: Natural,
strategy: StrategyType): ?!EncodingParams =
ecK: Natural,
ecM: Natural,
strategy: StrategyType,
): ?!EncodingParams =
if ecK > manifest.blocksCount:
return failure(
"Unable to encode manifest, not enough blocks, ecK = " &
$ecK &
", blocksCount = " &
$manifest.blocksCount)
let exc = (ref InsufficientBlocksError)(
msg:
"Unable to encode manifest, not enough blocks, ecK = " & $ecK &
", blocksCount = " & $manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize,
)
return failure(exc)
let
rounded = roundUp(manifest.blocksCount, ecK)
@ -253,19 +289,81 @@ proc init*(
rounded: rounded,
steps: steps,
blocksCount: blocksCount,
strategy: strategy
strategy: strategy,
)
proc encodeData(
proc leopardEncodeTask(tp: Taskpool, task: ptr EncodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let encoder =
task[].erasure.encoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
encoder.release()
discard task[].signal.fireSync()
if (
let res =
encoder.encode(task[].blocks, task[].parity, task[].blocksLen, task[].parityLen)
res.isErr
):
warn "Error from leopard encoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncEncode*(
self: Erasure,
manifest: Manifest,
params: EncodingParams
blockSize, blocksLen, parityLen: int,
blocks: ref seq[seq[byte]],
parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var data = makeUncheckedArray(blocks)
defer:
dealloc(data)
## Create an ecode task with block data
var task = EncodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
blocks: data,
parity: parity,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardEncodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard encoding task failed")
success()
proc encodeData(
self: Erasure, manifest: Manifest, params: EncodingParams
): Future[?!Manifest] {.async.} =
## Encode blocks pointed to by the protected manifest
##
## `manifest` - the manifest to encode
##
logScope:
steps = params.steps
rounded_blocks = params.rounded
@ -275,7 +373,6 @@ proc encodeData(
var
cids = seq[Cid].new()
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
emptyBlock = newSeq[byte](manifest.blockSize.int)
cids[].setLen(params.blocksCount)
@ -285,30 +382,46 @@ proc encodeData(
# TODO: Don't allocate a new seq every time, allocate once and zero out
var
data = seq[seq[byte]].new() # number of blocks to encode
parity = createDoubleArray(params.ecM, manifest.blockSize.int)
defer:
freeDoubleArray(parity, params.ecM)
data[].setLen(params.ecK)
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
without resolved =?
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)),
err:
trace "Unable to prepare data", error = err.msg
return failure(err)
trace "Erasure coding data", data = data[].len, parity = params.ecM
trace "Erasure coding data", data = data[].len
without parity =? await asyncEncode(self.taskpool, encoder, data, manifest.blockSize.int, params.ecM), err:
trace "Error encoding data", err = err.msg
try:
if err =? (
await self.asyncEncode(
manifest.blockSize.int, params.ecK, params.ecM, data, parity
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
var idx = params.rounded + step
for j in 0 ..< params.ecM:
without blk =? bt.Block.new(parity[j]), error:
var innerPtr: ptr UncheckedArray[byte] = parity[][j]
without blk =? bt.Block.new(innerPtr.toOpenArray(0, manifest.blockSize.int - 1)),
error:
trace "Unable to create parity block", err = error.msg
return failure(error)
trace "Adding parity block", cid = blk.cid, idx
cids[idx] = blk.cid
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
idx.inc(params.steps)
@ -327,7 +440,7 @@ proc encodeData(
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
ecK = params.ecK,
ecM = params.ecM,
strategy = params.strategy
strategy = params.strategy,
)
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
@ -338,15 +451,14 @@ proc encodeData(
except CatchableError as exc:
trace "Erasure coding encoding error", exc = exc.msg
return failure(exc)
finally:
encoder.release()
proc encode*(
self: Erasure,
manifest: Manifest,
blocks: Natural,
parity: Natural,
strategy = SteppedStrategy): Future[?!Manifest] {.async.} =
strategy = SteppedStrategy,
): Future[?!Manifest] {.async.} =
## Encode a manifest into one that is erasure protected.
##
## `manifest` - the original manifest to be encoded
@ -362,16 +474,84 @@ proc encode*(
return success encodedManifest
proc decode*(
self: Erasure,
encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
proc leopardDecodeTask(tp: Taskpool, task: ptr DecodeTask) {.gcsafe.} =
# Task suitable for running in taskpools - look, no GC!
let decoder =
task[].erasure.decoderProvider(task[].blockSize, task[].blocksLen, task[].parityLen)
defer:
decoder.release()
discard task[].signal.fireSync()
if (
let res = decoder.decode(
task[].blocks,
task[].parity,
task[].recovered,
task[].blocksLen,
task[].parityLen,
task[].recoveredLen,
)
res.isErr
):
warn "Error from leopard decoder backend!", error = $res.error
task[].success.store(false)
else:
task[].success.store(true)
proc asyncDecode*(
self: Erasure,
blockSize, blocksLen, parityLen: int,
blocks, parity: ref seq[seq[byte]],
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
): Future[?!void] {.async: (raises: [CancelledError]).} =
without threadPtr =? ThreadSignalPtr.new():
return failure("Unable to create thread signal")
defer:
threadPtr.close().expect("closing once works")
var
blockData = makeUncheckedArray(blocks)
parityData = makeUncheckedArray(parity)
defer:
dealloc(blockData)
dealloc(parityData)
## Create an decode task with block data
var task = DecodeTask(
erasure: addr self,
blockSize: blockSize,
blocksLen: blocksLen,
parityLen: parityLen,
recoveredLen: blocksLen,
blocks: blockData,
parity: parityData,
recovered: recovered,
signal: threadPtr,
)
doAssert self.taskPool.numThreads > 1,
"Must have at least one separate thread or signal will never be fired"
self.taskPool.spawn leopardDecodeTask(self.taskPool, addr task)
let threadFut = threadPtr.wait()
if joinErr =? catch(await threadFut.join()).errorOption:
if err =? catch(await noCancel threadFut).errorOption:
return failure(err)
if joinErr of CancelledError:
raise (ref CancelledError) joinErr
else:
return failure(joinErr)
if not task.success.load():
return failure("Leopard decoding task failed")
success()
proc decodeInternal(
self: Erasure, encoded: Manifest
): Future[?!(ref seq[Cid], seq[Natural])] {.async.} =
logScope:
steps = encoded.steps
rounded_blocks = encoded.rounded
@ -386,15 +566,26 @@ proc decode*(
cids[].setLen(encoded.blocksCount)
try:
for step in 0 ..< encoded.steps:
# TODO: this is a tight blocking loop so we sleep here to allow
# other events to be processed, this should be addressed
# by threading
await sleepAsync(10.millis)
var
data = seq[seq[byte]].new()
parity = seq[seq[byte]].new()
parityData = seq[seq[byte]].new()
recovered = createDoubleArray(encoded.ecK, encoded.blockSize.int)
defer:
freeDoubleArray(recovered, encoded.ecK)
data[].setLen(encoded.ecK) # set len to K
parity[].setLen(encoded.ecM) # set len to M
parityData[].setLen(encoded.ecM) # set len to M
without (dataPieces, _) =?
(await self.prepareDecodingData(encoded, step, data, parity, cids, emptyBlock)), err:
without (dataPieces, _) =? (
await self.prepareDecodingData(
encoded, step, data, parityData, cids, emptyBlock
)
), err:
trace "Unable to prepare data", error = err.msg
return failure(err)
@ -403,23 +594,34 @@ proc decode*(
continue
trace "Erasure decoding data"
without recovered =? await asyncDecode(self.taskpool, decoder, data, parity, encoded.blockSize.int), err:
trace "Error decoding data", err = err.msg
try:
if err =? (
await self.asyncDecode(
encoded.blockSize.int, encoded.ecK, encoded.ecM, data, parityData, recovered
)
).errorOption:
return failure(err)
except CancelledError as exc:
raise exc
for i in 0 ..< encoded.ecK:
let idx = i * encoded.steps + step
if data[i].len <= 0 and not cids[idx].isEmpty:
without blk =? bt.Block.new(recovered[i]), error:
var innerPtr: ptr UncheckedArray[byte] = recovered[][i]
without blk =? bt.Block.new(
innerPtr.toOpenArray(0, encoded.blockSize.int - 1)
), error:
trace "Unable to create block!", exc = error.msg
return failure(error)
trace "Recovered block", cid = blk.cid, index = i
if isErr (await self.store.putBlock(blk)):
trace "Unable to store block!", cid = blk.cid
if error =? (await self.store.putBlock(blk)).errorOption:
warn "Unable to store block!", cid = blk.cid, msg = error.msg
return failure("Unable to store block!")
self.store.completeBlock(BlockAddress.init(encoded.treeCid, idx), blk)
cids[idx] = blk.cid
recoveredIndices.add(idx)
except CancelledError as exc:
@ -431,6 +633,19 @@ proc decode*(
finally:
decoder.release()
return (cids, recoveredIndices).success
proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} =
## Decode a protected manifest into it's original
## manifest
##
## `encoded` - the encoded (protected) manifest to
## be recovered
##
without (cids, recoveredIndices) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
@ -438,10 +653,12 @@ proc decode*(
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure("Original tree root differs from the tree root computed out of recovered data")
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
let idxIter = Iter[Natural].new(recoveredIndices)
.filter((i: Natural) => i < tree.leavesCount)
let idxIter =
Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount)
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
return failure(err)
@ -450,6 +667,44 @@ proc decode*(
return decoded.success
proc repair*(self: Erasure, encoded: Manifest): Future[?!void] {.async.} =
## Repair a protected manifest by reconstructing the full dataset
##
## `encoded` - the encoded (protected) manifest to
## be repaired
##
without (cids, _) =? (await self.decodeInternal(encoded)), err:
return failure(err)
without tree =? CodexTree.init(cids[0 ..< encoded.originalBlocksCount]), err:
return failure(err)
without treeCid =? tree.rootCid, err:
return failure(err)
if treeCid != encoded.originalTreeCid:
return failure(
"Original tree root differs from the tree root computed out of recovered data"
)
if err =? (await self.store.putAllProofs(tree)).errorOption:
return failure(err)
without repaired =? (
await self.encode(
Manifest.new(encoded), encoded.ecK, encoded.ecM, encoded.protectedStrategy
)
), err:
return failure(err)
if repaired.treeCid != encoded.treeCid:
return failure(
"Original tree root differs from the repaired tree root encoded out of recovered data"
)
return success()
proc start*(self: Erasure) {.async.} =
return
@ -461,12 +716,13 @@ proc new*(
store: BlockStore,
encoderProvider: EncoderProvider,
decoderProvider: DecoderProvider,
taskpool: Taskpool): Erasure =
taskPool: Taskpool,
): Erasure =
## Create a new Erasure instance for encoding and decoding manifests
##
Erasure(
store: store,
encoderProvider: encoderProvider,
decoderProvider: decoderProvider,
taskpool: taskpool)
taskPool: taskPool,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,9 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
{.push raises: [].}
import pkg/stew/results
import std/options
import std/sugar
import std/sequtils
import pkg/results
import pkg/chronos
import pkg/questionable/results
@ -19,14 +23,18 @@ type
CodexError* = object of CatchableError # base codex error
CodexResult*[T] = Result[T, ref CodexError]
FinishedFailed*[T] = tuple[success: seq[Future[T]], failure: seq[Future[T]]]
template mapFailure*[T, V, E](
exp: Result[T, V],
exc: typedesc[E],
exp: Result[T, V], exc: typedesc[E]
): Result[T, ref CatchableError] =
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
##
exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e))
exp.mapErr(
proc(e: V): ref CatchableError =
(ref exc)(msg: $e)
)
template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] =
mapFailure(exp, CodexError)
@ -38,12 +46,43 @@ func toFailure*[T](exp: Option[T]): Result[T, ref CatchableError] {.inline.} =
else:
T.failure("Option is None")
proc allFutureResult*[T](fut: seq[Future[T]]): Future[?!void] {.async.} =
try:
await allFuturesThrowing(fut)
except CancelledError as exc:
raise exc
except CatchableError as exc:
return failure(exc.msg)
proc allFinishedFailed*[T](
futs: auto
): Future[FinishedFailed[T]] {.async: (raises: [CancelledError]).} =
## Check if all futures have finished or failed
##
## TODO: wip, not sure if we want this - at the minimum,
## we should probably avoid the async transform
return success()
var res: FinishedFailed[T] = (@[], @[])
await allFutures(futs)
for f in futs:
if f.failed:
res.failure.add f
else:
res.success.add f
return res
proc allFinishedValues*[T](
futs: auto
): Future[?!seq[T]] {.async: (raises: [CancelledError]).} =
## If all futures have finished, return corresponding values,
## otherwise return failure
##
# wait for all futures to be either completed, failed or canceled
await allFutures(futs)
let numOfFailed = futs.countIt(it.failed)
if numOfFailed > 0:
return failure "Some futures failed (" & $numOfFailed & "))"
# here, we know there are no failed futures in "futs"
# and we are only interested in those that completed successfully
let values = collect:
for b in futs:
if b.finished:
b.value
return success values

View File

@ -10,7 +10,7 @@ type
# 0 => 0, 1, 2
# 1 => 3, 4, 5
# 2 => 6, 7, 8
LinearStrategy,
LinearStrategy
# Stepped indexing:
# 0 => 0, 3, 6
@ -21,77 +21,106 @@ type
# Representing a strategy for grouping indices (of blocks usually)
# Given an interation-count as input, will produce a seq of
# selected indices.
IndexingError* = object of CodexError
IndexingWrongIndexError* = object of IndexingError
IndexingWrongIterationsError* = object of IndexingError
IndexingWrongGroupCountError* = object of IndexingError
IndexingWrongPadBlockCountError* = object of IndexingError
IndexingStrategy* = object
strategyType*: StrategyType
strategyType*: StrategyType # Indexing strategy algorithm
firstIndex*: int # Lowest index that can be returned
lastIndex*: int # Highest index that can be returned
iterations*: int # getIndices(iteration) will run from 0 ..< iterations
step*: int
iterations*: int # Number of iteration steps (0 ..< iterations)
step*: int # Step size between generated indices
groupCount*: int # Number of groups to partition indices into
padBlockCount*: int # Number of padding blocks to append per group
func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} =
func checkIteration(
self: IndexingStrategy, iteration: int
): void {.raises: [IndexingError].} =
if iteration >= self.iterations:
raise newException(
IndexingError,
"Indexing iteration can't be greater than or equal to iterations.")
IndexingError, "Indexing iteration can't be greater than or equal to iterations."
)
func getIter(first, last, step: int): Iter[int] =
{.cast(noSideEffect).}:
Iter[int].new(first, last, step)
func getLinearIndicies(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getLinearIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration * (self.step + 1)
last = min(first + self.step, self.lastIndex)
first = self.firstIndex + iteration * self.step
last = min(first + self.step - 1, self.lastIndex)
getIter(first, last, 1)
func getSteppedIndicies(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
func getSteppedIndices(self: IndexingStrategy, iteration: int): Iter[int] =
let
first = self.firstIndex + iteration
last = self.lastIndex
getIter(first, last, self.iterations)
func getIndicies*(
self: IndexingStrategy,
iteration: int): Iter[int] {.raises: [IndexingError].} =
func getStrategyIndices(self: IndexingStrategy, iteration: int): Iter[int] =
case self.strategyType
of StrategyType.LinearStrategy:
self.getLinearIndicies(iteration)
self.getLinearIndices(iteration)
of StrategyType.SteppedStrategy:
self.getSteppedIndicies(iteration)
self.getSteppedIndices(iteration)
func getIndices*(
self: IndexingStrategy, iteration: int
): Iter[int] {.raises: [IndexingError].} =
self.checkIteration(iteration)
{.cast(noSideEffect).}:
Iter[int].new(
iterator (): int {.gcsafe.} =
for value in self.getStrategyIndices(iteration):
yield value
for i in 0 ..< self.padBlockCount:
yield self.lastIndex + (iteration + 1) + i * self.groupCount
)
func init*(
strategy: StrategyType,
firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} =
firstIndex, lastIndex, iterations: int,
groupCount = 0,
padBlockCount = 0,
): IndexingStrategy {.raises: [IndexingError].} =
if firstIndex > lastIndex:
raise newException(
IndexingWrongIndexError,
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")")
"firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex &
")",
)
if iterations <= 0:
raise newException(
IndexingWrongIterationsError,
"iterations (" & $iterations & ") must be greater than zero.")
"iterations (" & $iterations & ") must be greater than zero.",
)
if padBlockCount < 0:
raise newException(
IndexingWrongPadBlockCountError,
"padBlockCount (" & $padBlockCount & ") must be equal or greater than zero.",
)
if padBlockCount > 0 and groupCount <= 0:
raise newException(
IndexingWrongGroupCountError,
"groupCount (" & $groupCount & ") must be greater than zero.",
)
IndexingStrategy(
strategyType: strategy,
firstIndex: firstIndex,
lastIndex: lastIndex,
iterations: iterations,
step: divUp((lastIndex - firstIndex), iterations))
step: divUp((lastIndex - firstIndex + 1), iterations),
groupCount: groupCount,
padBlockCount: padBlockCount,
)

View File

@ -11,7 +11,7 @@
## 4. Remove usages of `nim-json-serialization` from the codebase
## 5. Remove need to declare `writeValue` for new types
## 6. Remove need to [avoid importing or exporting `toJson`, `%`, `%*` to prevent
## conflicts](https://github.com/codex-storage/nim-codex/pull/645#issuecomment-1838834467)
## conflicts](https://github.com/logos-storage/logos-storage-nim/pull/645#issuecomment-1838834467)
##
## When declaring a new type, one should consider importing the `codex/logutils`
## module, and specifying `formatIt`. If textlines log output and json log output
@ -98,7 +98,6 @@ import pkg/questionable/results
import ./utils/json except formatIt # TODO: remove exception?
import pkg/stew/byteutils
import pkg/stint
import pkg/upraises
export byteutils
export chronicles except toJson, formatIt, `%`
@ -107,7 +106,6 @@ export sequtils
export json except formatIt
export strutils
export sugar
export upraises
export results
func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
@ -125,7 +123,8 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string =
short
func shortHexLog*(long: string): string =
if long[0..1] == "0x": result &= "0x"
if long[0 .. 1] == "0x":
result &= "0x"
result &= long[2 .. long.high].shortLog("..", 4, 4)
func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string =
@ -153,7 +152,7 @@ proc formatTextLineSeq*(val: seq[string]): string =
template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
# Provides formatters for logging with Chronicles for the given type and
# `LogFormat`.
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overriddden
# NOTE: `seq[T]`, `Option[T]`, and `seq[Option[T]]` are overridden
# since the base `setProperty` is generic using `auto` and conflicts with
# providing a generic `seq` and `Option` override.
when format == LogFormat.json:
@ -184,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
let v = opts.map(opt => opt.formatJsonOption)
setProperty(r, key, json.`%`(v))
proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) =
proc setProperty*(
r: var JsonRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, json.`%`(v))
proc setProperty*(r: var JsonRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
proc setProperty*(
r: var JsonRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T = val
let v = body
setProperty(r, key, json.`%`(v))
@ -220,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) =
let v = opts.map(opt => opt.formatTextLineOption)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) =
proc setProperty*(
r: var TextLineRecord, key: string, val: seq[T]
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T
let v = val.map(it => body)
setProperty(r, key, v.formatTextLineSeq)
proc setProperty*(r: var TextLineRecord, key: string, val: T) {.upraises:[ValueError, IOError].} =
proc setProperty*(
r: var TextLineRecord, key: string, val: T
) {.raises: [ValueError, IOError].} =
var it {.inject, used.}: T = val
let v = body
setProperty(r, key, v)
template formatIt*(T: type, body: untyped) {.dirty.} =
formatIt(LogFormat.textLines, T): body
formatIt(LogFormat.json, T): body
formatIt(LogFormat.textLines, T):
body
formatIt(LogFormat.json, T):
body
formatIt(LogFormat.textLines, Cid): shortLog($it)
formatIt(LogFormat.json, Cid): $it
formatIt(UInt256): $it
formatIt(MultiAddress): $it
formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog
formatIt(LogFormat.json, array[32, byte]): it.to0xHex
formatIt(LogFormat.textLines, Cid):
shortLog($it)
formatIt(LogFormat.json, Cid):
$it
formatIt(UInt256):
$it
formatIt(MultiAddress):
$it
formatIt(LogFormat.textLines, array[32, byte]):
it.short0xHexLog
formatIt(LogFormat.json, array[32, byte]):
it.to0xHex

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,9 +9,9 @@
# This module implements serialization and deserialization of Manifest
import pkg/upraises
import times
push: {.upraises: [].}
{.push raises: [].}
import std/tables
import std/sequtils
@ -59,6 +59,8 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
# optional hcodec: MultiCodec = 5 # Multihash codec
# optional version: CidVersion = 6; # Cid version
# optional ErasureInfo erasure = 7; # erasure coding info
# optional filename: ?string = 8; # original filename
# optional mimetype: ?string = 9; # original mimetype
# }
# ```
#
@ -70,6 +72,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
header.write(4, manifest.codec.uint32)
header.write(5, manifest.hcodec.uint32)
header.write(6, manifest.version.uint32)
if manifest.protected:
var erasureInfo = initProtoBuffer()
erasureInfo.write(1, manifest.ecK.uint32)
@ -90,6 +93,12 @@ proc encode*(manifest: Manifest): ?!seq[byte] =
erasureInfo.finish()
header.write(7, erasureInfo)
if manifest.filename.isSome:
header.write(8, manifest.filename.get())
if manifest.mimetype.isSome:
header.write(9, manifest.mimetype.get())
pbNode.write(1, header) # set the treeCid as the data field
pbNode.finish()
@ -118,6 +127,8 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
slotRoots: seq[seq[byte]]
cellSize: uint32
verifiableStrategy: uint32
filename: string
mimetype: string
# Decode `Header` message
if pbNode.getField(1, pbHeader).isErr:
@ -145,6 +156,12 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
if pbHeader.getField(7, pbErasureInfo).isErr:
return failure("Unable to decode `erasureInfo` from manifest!")
if pbHeader.getField(8, filename).isErr:
return failure("Unable to decode `filename` from manifest!")
if pbHeader.getField(9, mimetype).isErr:
return failure("Unable to decode `mimetype` from manifest!")
let protected = pbErasureInfo.buffer.len > 0
var verifiable = false
if protected:
@ -180,11 +197,13 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
if pbVerificationInfo.getField(4, verifiableStrategy).isErr:
return failure("Unable to decode `verifiableStrategy` from manifest!")
let
treeCid = ? Cid.init(treeCidBuf).mapFailure
let treeCid = ?Cid.init(treeCidBuf).mapFailure
let
self = if protected:
var filenameOption = if filename.len == 0: string.none else: filename.some
var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some
let self =
if protected:
Manifest.new(
treeCid = treeCid,
datasetSize = datasetSize.NBytes,
@ -196,7 +215,10 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
ecM = ecM.int,
originalTreeCid = ?Cid.init(originalTreeCid).mapFailure,
originalDatasetSize = originalDatasetSize.NBytes,
strategy = StrategyType(protectedStrategy))
strategy = StrategyType(protectedStrategy),
filename = filenameOption,
mimetype = mimetypeOption,
)
else:
Manifest.new(
treeCid = treeCid,
@ -204,7 +226,10 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
blockSize = blockSize.NBytes,
version = CidVersion(version),
hcodec = hcodec.MultiCodec,
codec = codec.MultiCodec)
codec = codec.MultiCodec,
filename = filenameOption,
mimetype = mimetypeOption,
)
?self.verify()
@ -218,7 +243,7 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest =
verifyRoot = verifyRootCid,
slotRoots = slotRootCids,
cellSize = cellSize.NBytes,
strategy = StrategyType(verifiableStrategy)
strategy = StrategyType(verifiableStrategy),
)
self.success

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -9,9 +9,7 @@
# This module defines all operations on Manifest
import pkg/upraises
push: {.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p/protobuf/minprotobuf
import pkg/libp2p/[cid, multihash, multicodec]
@ -25,17 +23,18 @@ import ../blocktype
import ../indexingstrategy
import ../logutils
# TODO: Manifest should be reworked to more concrete types,
# perhaps using inheritance
type
Manifest* = object of RootObj
type Manifest* = ref object of RootObj
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
blockSize {.serialize.}: NBytes
# Size of each contained block (might not be needed if blocks are len-prefixed)
codec: MultiCodec # Dataset codec
hcodec: MultiCodec # Multihash codec
version: CidVersion # Cid version
filename {.serialize.}: ?string # The filename of the content uploaded (optional)
mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional)
case protected {.serialize.}: bool # Protected datasets have erasure coded info
of true:
ecK: int # Number of blocks to encode
@ -43,7 +42,8 @@ type
originalTreeCid: Cid # The original root of the dataset being erasure coded
originalDatasetSize: NBytes
protectedStrategy: StrategyType # Indexing strategy used to build the slot roots
case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs
case verifiable {.serialize.}: bool
# Verifiable datasets can be used to generate storage proofs
of true:
verifyRoot: Cid # Root of the top level merkle tree built from slot roots
slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks
@ -121,6 +121,12 @@ func verifiableStrategy*(self: Manifest): StrategyType =
func numSlotBlocks*(self: Manifest): int =
divUp(self.blocksCount, self.numSlots)
func filename*(self: Manifest): ?string =
self.filename
func mimetype*(self: Manifest): ?string =
self.mimetype
############################################################
# Operations on block list
############################################################
@ -135,13 +141,6 @@ func isManifest*(mc: MultiCodec): ?!bool =
# Various sizes and verification
############################################################
func bytes*(self: Manifest, pad = true): NBytes =
## Compute how many bytes corresponding StoreStream(Manifest, pad) will return
if pad or self.protected:
self.blocksCount.NBytes * self.blockSize
else:
self.datasetSize
func rounded*(self: Manifest): int =
## Number of data blocks in *protected* manifest including padding at the end
roundUp(self.originalBlocksCount, self.ecK)
@ -155,59 +154,60 @@ func verify*(self: Manifest): ?!void =
##
if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)):
return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return
failure newException(CodexError, "Broken manifest: wrong originalBlocksCount")
return success()
func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} =
self.treeCid.success
func `==`*(a, b: Manifest): bool =
(a.treeCid == b.treeCid) and
(a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and
(a.version == b.version) and
(a.hcodec == b.hcodec) and
(a.codec == b.codec) and
(a.protected == b.protected) and
(if a.protected:
(a.ecK == b.ecK) and
(a.ecM == b.ecM) and
(a.originalTreeCid == b.originalTreeCid) and
(a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and
(a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and
(a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and
(a.mimetype == b.mimetype) and (
if a.protected:
(a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and
(a.originalDatasetSize == b.originalDatasetSize) and
(a.protectedStrategy == b.protectedStrategy) and
(a.verifiable == b.verifiable) and
(if a.verifiable:
(a.verifyRoot == b.verifyRoot) and
(a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and
(a.verifiableStrategy == b.verifiableStrategy)
(a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and
(
if a.verifiable:
(a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and
(a.cellSize == b.cellSize) and (
a.verifiableStrategy == b.verifiableStrategy
)
else:
true)
true
)
else:
true)
true
)
func `$`*(self: Manifest): string =
"treeCid: " & $self.treeCid &
", datasetSize: " & $self.datasetSize &
", blockSize: " & $self.blockSize &
", version: " & $self.version &
", hcodec: " & $self.hcodec &
", codec: " & $self.codec &
", protected: " & $self.protected &
(if self.protected:
", ecK: " & $self.ecK &
", ecM: " & $self.ecM &
", originalTreeCid: " & $self.originalTreeCid &
", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable &
(if self.verifiable:
", verifyRoot: " & $self.verifyRoot &
", slotRoots: " & $self.slotRoots
result =
"treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " &
$self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec &
", codec: " & $self.codec & ", protected: " & $self.protected
if self.filename.isSome:
result &= ", filename: " & $self.filename
if self.mimetype.isSome:
result &= ", mimetype: " & $self.mimetype
result &= (
if self.protected:
", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " &
$self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize &
", verifiable: " & $self.verifiable & (
if self.verifiable:
", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots
else:
"")
""
)
else:
"")
""
)
return result
############################################################
# Constructors
@ -221,8 +221,10 @@ func new*(
version: CidVersion = CIDv1,
hcodec = Sha256HashCodec,
codec = BlockCodec,
protected = false): Manifest =
protected = false,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
T(
treeCid: treeCid,
blockSize: blockSize,
@ -230,7 +232,10 @@ func new*(
version: version,
codec: codec,
hcodec: hcodec,
protected: protected)
protected: protected,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
@ -238,7 +243,8 @@ func new*(
treeCid: Cid,
datasetSize: NBytes,
ecK, ecM: int,
strategy: StrategyType): Manifest =
strategy = SteppedStrategy,
): Manifest =
## Create an erasure protected dataset from an
## unprotected one
##
@ -251,14 +257,16 @@ func new*(
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: true,
ecK: ecK, ecM: ecM,
ecK: ecK,
ecM: ecM,
originalTreeCid: manifest.treeCid,
originalDatasetSize: manifest.datasetSize,
protectedStrategy: strategy)
protectedStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
manifest: Manifest): Manifest =
func new*(T: type Manifest, manifest: Manifest): Manifest =
## Create an unprotected dataset from an
## erasure protected one
##
@ -270,7 +278,10 @@ func new*(
codec: manifest.codec,
hcodec: manifest.hcodec,
blockSize: manifest.blockSize,
protected: false)
protected: false,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
@ -284,8 +295,10 @@ func new*(
ecM: int,
originalTreeCid: Cid,
originalDatasetSize: NBytes,
strategy: StrategyType): Manifest =
strategy = SteppedStrategy,
filename: ?string = string.none,
mimetype: ?string = string.none,
): Manifest =
Manifest(
treeCid: treeCid,
datasetSize: datasetSize,
@ -298,7 +311,10 @@ func new*(
ecM: ecM,
originalTreeCid: originalTreeCid,
originalDatasetSize: originalDatasetSize,
protectedStrategy: strategy)
protectedStrategy: strategy,
filename: filename,
mimetype: mimetype,
)
func new*(
T: type Manifest,
@ -306,18 +322,19 @@ func new*(
verifyRoot: Cid,
slotRoots: openArray[Cid],
cellSize = DefaultCellSize,
strategy = SteppedStrategy): ?!Manifest =
strategy = LinearStrategy,
): ?!Manifest =
## Create a verifiable dataset from an
## protected one
##
if not manifest.protected:
return failure newException(
CodexError, "Can create verifiable manifest only from protected manifest.")
CodexError, "Can create verifiable manifest only from protected manifest."
)
if slotRoots.len != manifest.numSlots:
return failure newException(
CodexError, "Wrong number of slot roots.")
return failure newException(CodexError, "Wrong number of slot roots.")
success Manifest(
treeCid: manifest.treeCid,
@ -329,17 +346,19 @@ func new*(
protected: true,
ecK: manifest.ecK,
ecM: manifest.ecM,
originalTreeCid: manifest.treeCid,
originalTreeCid: manifest.originalTreeCid,
originalDatasetSize: manifest.originalDatasetSize,
protectedStrategy: manifest.protectedStrategy,
verifiable: true,
verifyRoot: verifyRoot,
slotRoots: @slotRoots,
cellSize: cellSize,
verifiableStrategy: strategy)
verifiableStrategy: strategy,
filename: manifest.filename,
mimetype: manifest.mimetype,
)
func new*(
T: type Manifest,
data: openArray[byte]): ?!Manifest =
func new*(T: type Manifest, data: openArray[byte]): ?!Manifest =
## Create a manifest instance from given data
##

View File

@ -1,5 +1,4 @@
import pkg/chronos
import pkg/upraises
import pkg/questionable
import pkg/ethers/erc20
import ./contracts/requests
@ -18,35 +17,89 @@ export periods
type
Market* = ref object of RootObj
MarketError* = object of CodexError
SlotStateMismatchError* = object of MarketError
SlotReservationNotAllowedError* = object of MarketError
ProofInvalidError* = object of MarketError
Subscription* = ref object of RootObj
OnRequest* = proc(id: RequestId,
ask: StorageAsk,
expiry: UInt256) {.gcsafe, upraises:[].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].}
PastStorageRequest* = object
requestId*: RequestId
ask*: StorageAsk
expiry*: UInt256
OnRequest* =
proc(id: RequestId, ask: StorageAsk, expiry: uint64) {.gcsafe, raises: [].}
OnFulfillment* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnSlotFilled* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotFreed* = proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnSlotReservationsFull* =
proc(requestId: RequestId, slotIndex: uint64) {.gcsafe, raises: [].}
OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, raises: [].}
OnProofSubmitted* = proc(id: SlotId) {.gcsafe, raises: [].}
ProofChallenge* = array[32, byte]
method getZkeyHash*(market: Market): Future[?string] {.base, async.} =
# Marketplace events -- located here due to the Market abstraction
MarketplaceEvent* = Event
StorageRequested* = object of MarketplaceEvent
requestId*: RequestId
ask*: StorageAsk
expiry*: uint64
SlotFilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotFreed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
SlotReservationsFull* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
slotIndex*: uint64
RequestFulfilled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestCancelled* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
RequestFailed* = object of MarketplaceEvent
requestId* {.indexed.}: RequestId
ProofSubmitted* = object of MarketplaceEvent
id*: SlotId
method loadConfig*(
market: Market
): Future[?!void] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method getSigner*(market: Market): Future[Address] {.base, async.} =
method getZkeyHash*(
market: Market
): Future[?string] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method periodicity*(market: Market): Future[Periodicity] {.base, async.} =
method getSigner*(
market: Market
): Future[Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofTimeout*(market: Market): Future[UInt256] {.base, async.} =
method periodicity*(
market: Market
): Future[Periodicity] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method proofDowntime*(market: Market): Future[uint8] {.base, async.} =
method proofTimeout*(
market: Market
): Future[uint64] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method repairRewardPercentage*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method requestDurationLimit*(market: Market): Future[uint64] {.base, async.} =
raiseAssert("not implemented")
method proofDowntime*(
market: Market
): Future[uint8] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getPointer*(market: Market, slotId: SlotId): Future[uint8] {.base, async.} =
@ -57,8 +110,9 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} =
let pntr = await market.getPointer(slotId)
return pntr < downtime
method requestStorage*(market: Market,
request: StorageRequest) {.base, async.} =
method requestStorage*(
market: Market, request: StorageRequest
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
@ -67,142 +121,193 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} =
method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} =
raiseAssert("not implemented")
method getRequest*(market: Market,
id: RequestId):
Future[?StorageRequest] {.base, async.} =
method getRequest*(
market: Market, id: RequestId
): Future[?StorageRequest] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method requestState*(market: Market,
requestId: RequestId): Future[?RequestState] {.base, async.} =
method requestState*(
market: Market, requestId: RequestId
): Future[?RequestState] {.base, async.} =
raiseAssert("not implemented")
method slotState*(market: Market,
slotId: SlotId): Future[SlotState] {.base, async.} =
method slotState*(
market: Market, slotId: SlotId
): Future[SlotState] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getRequestEnd*(market: Market,
id: RequestId): Future[SecondsSince1970] {.base, async.} =
method getRequestEnd*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method requestExpiresAt*(market: Market,
id: RequestId): Future[SecondsSince1970] {.base, async.} =
method requestExpiresAt*(
market: Market, id: RequestId
): Future[SecondsSince1970] {.base, async.} =
raiseAssert("not implemented")
method getHost*(market: Market,
requestId: RequestId,
slotIndex: UInt256): Future[?Address] {.base, async.} =
method getHost*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?Address] {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method getActiveSlot*(
method currentCollateral*(
market: Market, slotId: SlotId
): Future[UInt256] {.base, async: (raises: [MarketError, CancelledError]).} =
raiseAssert("not implemented")
method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(
market: Market,
slotId: SlotId): Future[?Slot] {.base, async.} =
raiseAssert("not implemented")
method fillSlot*(market: Market,
requestId: RequestId,
slotIndex: UInt256,
slotIndex: uint64,
proof: Groth16Proof,
collateral: UInt256) {.base, async.} =
collateral: UInt256,
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method freeSlot*(market: Market, slotId: SlotId) {.base, async.} =
method freeSlot*(
market: Market, slotId: SlotId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method withdrawFunds*(market: Market,
requestId: RequestId) {.base, async.} =
method withdrawFunds*(
market: Market, requestId: RequestId
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeRequests*(market: Market,
callback: OnRequest):
Future[Subscription] {.base, async.} =
method subscribeRequests*(
market: Market, callback: OnRequest
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method isProofRequired*(market: Market,
id: SlotId): Future[bool] {.base, async.} =
method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method willProofBeRequired*(market: Market,
id: SlotId): Future[bool] {.base, async.} =
method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} =
method getChallenge*(
market: Market, id: SlotId
): Future[ProofChallenge] {.base, async.} =
raiseAssert("not implemented")
method submitProof*(market: Market,
id: SlotId,
proof: Groth16Proof) {.base, async.} =
method submitProof*(
market: Market, id: SlotId, proof: Groth16Proof
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method markProofAsMissing*(market: Market,
id: SlotId,
period: Period) {.base, async.} =
method markProofAsMissing*(
market: Market, id: SlotId, period: Period
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method canProofBeMarkedAsMissing*(market: Market,
id: SlotId,
period: Period): Future[bool] {.base, async.} =
method canMarkProofAsMissing*(
market: Market, id: SlotId, period: Period
): Future[bool] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method subscribeFulfillment*(market: Market,
callback: OnFulfillment):
Future[Subscription] {.base, async.} =
method reserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
) {.base, async: (raises: [CancelledError, MarketError]).} =
raiseAssert("not implemented")
method subscribeFulfillment*(market: Market,
requestId: RequestId,
callback: OnFulfillment):
Future[Subscription] {.base, async.} =
method canReserveSlot*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[bool] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(market: Market,
callback: OnSlotFilled):
Future[Subscription] {.base, async.} =
method subscribeFulfillment*(
market: Market, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFilled*(market: Market,
requestId: RequestId,
slotIndex: UInt256,
callback: OnSlotFilled):
Future[Subscription] {.base, async.} =
method subscribeFulfillment*(
market: Market, requestId: RequestId, callback: OnFulfillment
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeSlotFreed*(market: Market,
callback: OnSlotFreed):
Future[Subscription] {.base, async.} =
method subscribeSlotFilled*(
market: Market, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(market: Market,
callback: OnRequestCancelled):
Future[Subscription] {.base, async.} =
method subscribeSlotFilled*(
market: Market, requestId: RequestId, slotIndex: uint64, callback: OnSlotFilled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestCancelled*(market: Market,
requestId: RequestId,
callback: OnRequestCancelled):
Future[Subscription] {.base, async.} =
method subscribeSlotFreed*(
market: Market, callback: OnSlotFreed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(market: Market,
callback: OnRequestFailed):
Future[Subscription] {.base, async.} =
method subscribeSlotReservationsFull*(
market: Market, callback: OnSlotReservationsFull
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeRequestFailed*(market: Market,
requestId: RequestId,
callback: OnRequestFailed):
Future[Subscription] {.base, async.} =
method subscribeRequestCancelled*(
market: Market, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(market: Market,
callback: OnProofSubmitted):
Future[Subscription] {.base, async.} =
method subscribeRequestCancelled*(
market: Market, requestId: RequestId, callback: OnRequestCancelled
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} =
method subscribeRequestFailed*(
market: Market, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequests*(market: Market,
blocksAgo: int):
Future[seq[PastStorageRequest]] {.base, async.} =
method subscribeRequestFailed*(
market: Market, requestId: RequestId, callback: OnRequestFailed
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method subscribeProofSubmission*(
market: Market, callback: OnProofSubmitted
): Future[Subscription] {.base, async.} =
raiseAssert("not implemented")
method unsubscribe*(subscription: Subscription) {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, blocksAgo: int
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastSlotFilledEvents*(
market: Market, fromTime: SecondsSince1970
): Future[seq[SlotFilled]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, fromBlock: BlockTag
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method queryPastStorageRequestedEvents*(
market: Market, blocksAgo: int
): Future[seq[StorageRequested]] {.base, async.} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, requestId: RequestId, slotIndex: uint64
): Future[?!UInt256] {.base, async: (raises: [CancelledError]).} =
raiseAssert("not implemented")
method slotCollateral*(
market: Market, collateralPerSlot: UInt256, slotState: SlotState
): ?!UInt256 {.base, gcsafe, raises: [].} =
raiseAssert("not implemented")

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -7,13 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/upraises
push: {.upraises: [].}
{.push raises: [], gcsafe.}
import pkg/libp2p
import pkg/questionable
import pkg/questionable/results
import pkg/stew/byteutils
import pkg/serde/json
import ../../units
import ../../errors
@ -24,11 +24,11 @@ const MaxMerkleTreeSize = 100.MiBs.uint
const MaxMerkleProofSize = 1.MiBs.uint
proc encode*(self: CodexTree): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.leavesCount.uint64)
for node in self.nodes:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(3, nodesPb)
@ -37,7 +37,7 @@ proc encode*(self: CodexTree): seq[byte] =
pb.buffer
proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var leavesCount: uint64
discard ?pb.getField(1, mcodecCode).mapFailure
@ -60,13 +60,13 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree =
CodexTree.fromNodes(mcodec, nodes, leavesCount.int)
proc encode*(self: CodexProof): seq[byte] =
var pb = initProtoBuffer(maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer()
pb.write(1, self.mcodec.uint64)
pb.write(2, self.index.uint64)
pb.write(3, self.nleaves.uint64)
for node in self.path:
var nodesPb = initProtoBuffer(maxSize = MaxMerkleTreeSize)
var nodesPb = initProtoBuffer()
nodesPb.write(1, node)
nodesPb.finish()
pb.write(4, nodesPb)
@ -75,7 +75,7 @@ proc encode*(self: CodexProof): seq[byte] =
pb.buffer
proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
var pb = initProtoBuffer(data, maxSize = MaxMerkleProofSize)
var pb = initProtoBuffer(data)
var mcodecCode: uint64
var index: uint64
var nleaves: uint64
@ -100,3 +100,16 @@ proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof =
nodes.add node
CodexProof.init(mcodec, index.int, nleaves.int, nodes)
proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof =
expectJsonKind(Cid, JString, json)
var bytes: seq[byte]
try:
bytes = hexToSeqByte(json.str)
except ValueError as err:
return failure(err)
CodexProof.decode(bytes)
func `%`*(proof: CodexProof): JsonNode =
%byteutils.toHex(proof.encode())

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -15,7 +15,7 @@ import std/sequtils
import pkg/questionable
import pkg/questionable/results
import pkg/libp2p/[cid, multicodec, multihash]
import pkg/constantine/hashes
import ../../utils
import ../../rng
import ../../errors
@ -47,24 +47,8 @@ type
CodexProof* = ref object of ByteProof
mcodec*: MultiCodec
func mhash*(mcodec: MultiCodec): ?!MHash =
let
mhash = CodeHashes.getOrDefault(mcodec)
if isNil(mhash.coder):
return failure "Invalid multihash codec"
success mhash
func digestSize*(self: (CodexTree or CodexProof)): int =
## Number of leaves
##
self.mhash.size
func getProof*(self: CodexTree, index: int): ?!CodexProof =
var
proof = CodexProof(mcodec: self.mcodec)
var proof = CodexProof(mcodec: self.mcodec)
?self.getProof(index, proof)
@ -78,12 +62,10 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
rootBytes = root.digestBytes
leafBytes = leaf.digestBytes
if self.mcodec != root.mcodec or
self.mcodec != leaf.mcodec:
if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec:
return failure "Hash codec mismatch"
if rootBytes.len != root.size and
leafBytes.len != leaf.size:
if rootBytes.len != root.size and leafBytes.len != leaf.size:
return failure "Invalid hash length"
self.verify(leafBytes, rootBytes)
@ -91,25 +73,17 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool =
func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool =
self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure)
proc rootCid*(
self: CodexTree,
version = CIDv1,
dataCodec = DatasetRootCodec): ?!Cid =
proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid =
if (?self.root).len == 0:
return failure "Empty root"
let
mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure
let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure
Cid.init(version, DatasetRootCodec, mhash).mapFailure
func getLeafCid*(
self: CodexTree,
i: Natural,
version = CIDv1,
dataCodec = BlockCodec): ?!Cid =
self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec
): ?!Cid =
if i >= self.leavesCount:
return failure "Invalid leaf index " & $i
@ -120,58 +94,46 @@ func getLeafCid*(
Cid.init(version, dataCodec, mhash).mapFailure
proc `$`*(self: CodexTree): string =
let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none"
"CodexTree(" &
" root: " & root &
", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels &
", mcodec: " & $self.mcodec & " )"
let root =
if self.root.isOk:
byteutils.toHex(self.root.get)
else:
"none"
"CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " &
$self.levels & ", mcodec: " & $self.mcodec & " )"
proc `$`*(self: CodexProof): string =
"CodexProof(" &
" nleaves: " & $self.nleaves &
", index: " & $self.index &
", path: " & $self.path.mapIt( byteutils.toHex(it) ) &
", mcodec: " & $self.mcodec & " )"
"CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " &
$self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )"
func compress*(
x, y: openArray[byte],
key: ByteTreeKey,
mhash: MHash): ?!ByteHash =
func compress*(x, y: openArray[byte], key: ByteTreeKey, codec: MultiCodec): ?!ByteHash =
## Compress two hashes
##
var digest = newSeq[byte](mhash.size)
mhash.coder(@x & @y & @[ key.byte ], digest)
success digest
let input = @x & @y & @[key.byte]
let digest = ?MultiHash.digest(codec, input).mapFailure
success digest.digestBytes
func init*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
leaves: openArray[ByteHash]): ?!CodexTree =
_: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash]
): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
let
mhash = ? mcodec.mhash()
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
Zero: ByteHash = newSeq[byte](mhash.size)
compress(x, y, key, mcodec)
digestSize = ?mcodec.digestSize.mapFailure
Zero: ByteHash = newSeq[byte](digestSize)
if mhash.size != leaves[0].len:
if digestSize != leaves[0].len:
return failure "Invalid hash length"
var
self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type CodexTree,
leaves: openArray[MultiHash]): ?!CodexTree =
func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
@ -181,9 +143,7 @@ func init*(
CodexTree.init(mcodec, leaves)
func init*(
_: type CodexTree,
leaves: openArray[Cid]): ?!CodexTree =
func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree =
if leaves.len == 0:
return failure "Empty leaves"
@ -197,18 +157,18 @@ proc fromNodes*(
_: type CodexTree,
mcodec: MultiCodec = Sha256HashCodec,
nodes: openArray[ByteHash],
nleaves: int): ?!CodexTree =
nleaves: int,
): ?!CodexTree =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
if mhash.size != nodes[0].len:
if digestSize != nodes[0].len:
return failure "Invalid hash length"
var
@ -235,16 +195,16 @@ func init*(
mcodec: MultiCodec = Sha256HashCodec,
index: int,
nleaves: int,
nodes: openArray[ByteHash]): ?!CodexProof =
nodes: openArray[ByteHash],
): ?!CodexProof =
if nodes.len == 0:
return failure "Empty nodes"
let
mhash = ? mcodec.mhash()
Zero = newSeq[byte](mhash.size)
digestSize = ?mcodec.digestSize.mapFailure
Zero = newSeq[byte](digestSize)
compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} =
compress(x, y, key, mhash)
compress(x, y, key, mcodec)
success CodexProof(
compress: compressor,
@ -252,4 +212,5 @@ func init*(
mcodec: mcodec,
index: index,
nleaves: nleaves,
path: @nodes)
path: @nodes,
)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -59,9 +59,8 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H =
return success last[0]
func getProof*[H, K](
self: MerkleTree[H, K],
index: int,
proof: MerkleProof[H, K]): ?!void =
self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K]
): ?!void =
let depth = self.depth
let nleaves = self.leavesCount
@ -73,7 +72,11 @@ func getProof*[H, K](
var m = nleaves
for i in 0 ..< depth:
let j = k xor 1
path[i] = if (j < m): self.layers[i][j] else: self.zero
path[i] =
if (j < m):
self.layers[i][j]
else:
self.zero
k = k shr 1
m = (m + 1) shr 1
@ -85,8 +88,7 @@ func getProof*[H, K](
success()
func getProof*[H, K](self: MerkleTree[H, K], index: int): ?!MerkleProof[H, K] =
var
proof = MerkleProof[H, K]()
var proof = MerkleProof[H, K]()
?self.getProof(index, proof)
@ -121,10 +123,8 @@ func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool =
success bool(root == ?proof.reconstructRoot(leaf))
func merkleTreeWorker*[H, K](
self: MerkleTree[H, K],
xs: openArray[H],
isBottomLayer: static bool): ?!seq[seq[H]] =
self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool
): ?!seq[seq[H]] =
let a = low(xs)
let b = high(xs)
let m = b - a + 1

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -24,10 +24,10 @@ import ./merkletree
export merkletree, poseidon2
const
KeyNoneF = F.fromhex("0x0")
KeyBottomLayerF = F.fromhex("0x1")
KeyOddF = F.fromhex("0x2")
KeyOddAndBottomLayerF = F.fromhex("0x3")
KeyNoneF = F.fromHex("0x0")
KeyBottomLayerF = F.fromHex("0x1")
KeyOddF = F.fromHex("0x2")
KeyOddAndBottomLayerF = F.fromHex("0x3")
Poseidon2Zero* = zero
@ -46,64 +46,49 @@ type
proc `$`*(self: Poseidon2Tree): string =
let root = if self.root.isOk: self.root.get.toHex else: "none"
"Poseidon2Tree(" &
" root: " & root &
", leavesCount: " & $self.leavesCount &
"Poseidon2Tree(" & " root: " & root & ", leavesCount: " & $self.leavesCount &
", levels: " & $self.levels & " )"
proc `$`*(self: Poseidon2Proof): string =
"Poseidon2Proof(" &
" nleaves: " & $self.nleaves &
", index: " & $self.index &
"Poseidon2Proof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index &
", path: " & $self.path.mapIt(it.toHex) & " )"
func toArray32*(bytes: openArray[byte]): array[32, byte] =
result[0 ..< bytes.len] = bytes[0 ..< bytes.len]
converter toKey*(key: PoseidonKeysEnum): Poseidon2Hash =
case key:
case key
of KeyNone: KeyNoneF
of KeyBottomLayer: KeyBottomLayerF
of KeyOdd: KeyOddF
of KeyOddAndBottomLayer: KeyOddAndBottomLayerF
func init*(
_: type Poseidon2Tree,
leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
func init*(_: type Poseidon2Tree, leaves: openArray[Poseidon2Hash]): ?!Poseidon2Tree =
if leaves.len == 0:
return failure "Empty leaves"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
var self = Poseidon2Tree(compress: compressor, zero: Poseidon2Zero)
self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true)
success self
func init*(
_: type Poseidon2Tree,
leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(
leaves.mapIt( Poseidon2Hash.fromBytes(it) ))
func init*(_: type Poseidon2Tree, leaves: openArray[array[31, byte]]): ?!Poseidon2Tree =
Poseidon2Tree.init(leaves.mapIt(Poseidon2Hash.fromBytes(it)))
proc fromNodes*(
_: type Poseidon2Tree,
nodes: openArray[Poseidon2Hash],
nleaves: int): ?!Poseidon2Tree =
_: type Poseidon2Tree, nodes: openArray[Poseidon2Hash], nleaves: int
): ?!Poseidon2Tree =
if nodes.len == 0:
return failure "Empty nodes"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
var
@ -126,18 +111,14 @@ proc fromNodes*(
success self
func init*(
_: type Poseidon2Proof,
index: int,
nleaves: int,
nodes: openArray[Poseidon2Hash]): ?!Poseidon2Proof =
_: type Poseidon2Proof, index: int, nleaves: int, nodes: openArray[Poseidon2Hash]
): ?!Poseidon2Proof =
if nodes.len == 0:
return failure "Empty nodes"
let
compressor = proc(
x, y: Poseidon2Hash,
key: PoseidonKeysEnum): ?!Poseidon2Hash {.noSideEffect.} =
let compressor = proc(
x, y: Poseidon2Hash, key: PoseidonKeysEnum
): ?!Poseidon2Hash {.noSideEffect.} =
success compress(x, y, key.toKey)
success Poseidon2Proof(
@ -145,4 +126,5 @@ func init*(
zero: Poseidon2Zero,
index: index,
nleaves: nleaves,
path: @nodes)
path: @nodes,
)

11
codex/multicodec_exts.nim Normal file
View File

@ -0,0 +1,11 @@
const CodecExts = [
("poseidon2-alt_bn_128-sponge-r2", 0xCD10), # bn128 rate 2 sponge
("poseidon2-alt_bn_128-merkle-2kb", 0xCD11), # bn128 2kb compress & merkleize
("poseidon2-alt_bn_128-keyed-compress", 0xCD12), # bn128 keyed compress]
("codex-manifest", 0xCD01),
("codex-block", 0xCD02),
("codex-root", 0xCD03),
("codex-slot-root", 0xCD04),
("codex-proving-root", 0xCD05),
("codex-slot-cell", 0xCD06),
]

40
codex/multihash_exts.nim Normal file
View File

@ -0,0 +1,40 @@
import blscurve/bls_public_exports
import pkg/constantine/hashes
import poseidon2
proc sha2_256hash_constantine(data: openArray[byte], output: var openArray[byte]) =
# Using Constantine's SHA256 instead of mhash for optimal performance on 32-byte merkle node hashing
# See: https://github.com/logos-storage/logos-storage-nim/issues/1162
if len(output) > 0:
let digest = hashes.sha256.hash(data)
copyMem(addr output[0], addr digest[0], 32)
proc poseidon2_sponge_rate2(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.Sponge.digest(data).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
proc poseidon2_merkle_2kb_sponge(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = poseidon2.SpongeMerkle.digest(data, 2048).toBytes()
copyMem(addr output[0], addr digest[0], uint(len(output)))
const Sha2256MultiHash* = MHash(
mcodec: multiCodec("sha2-256"),
size: sha256.sizeDigest,
coder: sha2_256hash_constantine,
)
const HashExts = [
# override sha2-256 hash function
Sha2256MultiHash,
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-sponge-r2"),
size: 32,
coder: poseidon2_sponge_rate2,
),
MHash(
mcodec: multiCodec("poseidon2-alt_bn_128-merkle-2kb"),
size: 32,
coder: poseidon2_merkle_2kb_sponge,
),
]

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -11,7 +11,8 @@ const
# Namespaces
CodexMetaNamespace* = "meta" # meta info stored here
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total" # number of blocks in the repo
CodexBlockTotalNamespace* = CodexMetaNamespace & "/total"
# number of blocks in the repo
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
CodexBlocksTtlNamespace* = # Cid TTL

432
codex/nat.nim Normal file
View File

@ -0,0 +1,432 @@
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import
std/[options, os, strutils, times, net, atomics],
stew/[objects],
nat_traversal/[miniupnpc, natpmp],
json_serialization/std/net,
results
import pkg/chronos
import pkg/chronicles
import pkg/libp2p
import ./utils
import ./utils/natutils
import ./utils/addrutils
const
UPNP_TIMEOUT = 200 # ms
PORT_MAPPING_INTERVAL = 20 * 60 # seconds
NATPMP_LIFETIME = 60 * 60 # in seconds, must be longer than PORT_MAPPING_INTERVAL
type PortMappings* = object
internalTcpPort: Port
externalTcpPort: Port
internalUdpPort: Port
externalUdpPort: Port
description: string
type PortMappingArgs =
tuple[strategy: NatStrategy, tcpPort, udpPort: Port, description: string]
type NatConfig* = object
case hasExtIp*: bool
of true: extIp*: IpAddress
of false: nat*: NatStrategy
var
upnp {.threadvar.}: Miniupnp
npmp {.threadvar.}: NatPmp
strategy = NatStrategy.NatNone
natClosed: Atomic[bool]
extIp: Option[IpAddress]
activeMappings: seq[PortMappings]
natThreads: seq[Thread[PortMappingArgs]] = @[]
logScope:
topics = "nat"
type PrefSrcStatus = enum
NoRoutingInfo
PrefSrcIsPublic
PrefSrcIsPrivate
BindAddressIsPublic
BindAddressIsPrivate
## Also does threadvar initialisation.
## Must be called before redirectPorts() in each thread.
proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] =
var externalIP: IpAddress
if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatUpnp:
if upnp == nil:
upnp = newMiniupnp()
upnp.discoverDelay = UPNP_TIMEOUT
let dres = upnp.discover()
if dres.isErr:
debug "UPnP", msg = dres.error
else:
var
msg: cstring
canContinue = true
case upnp.selectIGD()
of IGDNotFound:
msg = "Internet Gateway Device not found. Giving up."
canContinue = false
of IGDFound:
msg = "Internet Gateway Device found."
of IGDNotConnected:
msg = "Internet Gateway Device found but it's not connected. Trying anyway."
of NotAnIGD:
msg =
"Some device found, but it's not recognised as an Internet Gateway Device. Trying anyway."
of IGDIpNotRoutable:
msg =
"Internet Gateway Device found and is connected, but with a reserved or non-routable IP. Trying anyway."
if not quiet:
debug "UPnP", msg
if canContinue:
let ires = upnp.externalIPAddress()
if ires.isErr:
debug "UPnP", msg = ires.error
else:
# if we got this far, UPnP is working and we don't need to try NAT-PMP
try:
externalIP = parseIpAddress(ires.value)
strategy = NatStrategy.NatUpnp
return some(externalIP)
except ValueError as e:
error "parseIpAddress() exception", err = e.msg
return
if natStrategy == NatStrategy.NatAny or natStrategy == NatStrategy.NatPmp:
if npmp == nil:
npmp = newNatPmp()
let nres = npmp.init()
if nres.isErr:
debug "NAT-PMP", msg = nres.error
else:
let nires = npmp.externalIPAddress()
if nires.isErr:
debug "NAT-PMP", msg = nires.error
else:
try:
externalIP = parseIpAddress($(nires.value))
strategy = NatStrategy.NatPmp
return some(externalIP)
except ValueError as e:
error "parseIpAddress() exception", err = e.msg
return
# This queries the routing table to get the "preferred source" attribute and
# checks if it's a public IP. If so, then it's our public IP.
#
# Further more, we check if the bind address (user provided, or a "0.0.0.0"
# default) is a public IP. That's a long shot, because code paths involving a
# user-provided bind address are not supposed to get here.
proc getRoutePrefSrc(bindIp: IpAddress): (Option[IpAddress], PrefSrcStatus) =
let bindAddress = initTAddress(bindIp, Port(0))
if bindAddress.isAnyLocal():
let ip = getRouteIpv4()
if ip.isErr():
# No route was found, log error and continue without IP.
error "No routable IP address found, check your network connection",
error = ip.error
return (none(IpAddress), NoRoutingInfo)
elif ip.get().isGlobalUnicast():
return (some(ip.get()), PrefSrcIsPublic)
else:
return (none(IpAddress), PrefSrcIsPrivate)
elif bindAddress.isGlobalUnicast():
return (some(bindIp), BindAddressIsPublic)
else:
return (none(IpAddress), BindAddressIsPrivate)
# Try to detect a public IP assigned to this host, before trying NAT traversal.
proc getPublicRoutePrefSrcOrExternalIP*(
natStrategy: NatStrategy, bindIp: IpAddress, quiet = true
): Option[IpAddress] =
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return prefSrcIp
of PrefSrcIsPrivate, BindAddressIsPrivate:
let extIp = getExternalIP(natStrategy, quiet)
if extIp.isSome:
return some(extIp.get)
proc doPortMapping(
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
): Option[(Port, Port)] {.gcsafe.} =
var
extTcpPort: Port
extUdpPort: Port
if strategy == NatStrategy.NatUpnp:
for t in [(tcpPort, UPNPProtocol.TCP), (udpPort, UPNPProtocol.UDP)]:
let
(port, protocol) = t
pmres = upnp.addPortMapping(
externalPort = $port,
protocol = protocol,
internalHost = upnp.lanAddr,
internalPort = $port,
desc = description,
leaseDuration = 0,
)
if pmres.isErr:
error "UPnP port mapping", msg = pmres.error, port
return
else:
# let's check it
let cres =
upnp.getSpecificPortMapping(externalPort = $port, protocol = protocol)
if cres.isErr:
warn "UPnP port mapping check failed. Assuming the check itself is broken and the port mapping was done.",
msg = cres.error
info "UPnP: added port mapping",
externalPort = port, internalPort = port, protocol = protocol
case protocol
of UPNPProtocol.TCP:
extTcpPort = port
of UPNPProtocol.UDP:
extUdpPort = port
elif strategy == NatStrategy.NatPmp:
for t in [(tcpPort, NatPmpProtocol.TCP), (udpPort, NatPmpProtocol.UDP)]:
let
(port, protocol) = t
pmres = npmp.addPortMapping(
eport = port.cushort,
iport = port.cushort,
protocol = protocol,
lifetime = NATPMP_LIFETIME,
)
if pmres.isErr:
error "NAT-PMP port mapping", msg = pmres.error, port
return
else:
let extPort = Port(pmres.value)
info "NAT-PMP: added port mapping",
externalPort = extPort, internalPort = port, protocol = protocol
case protocol
of NatPmpProtocol.TCP:
extTcpPort = extPort
of NatPmpProtocol.UDP:
extUdpPort = extPort
return some((extTcpPort, extUdpPort))
proc repeatPortMapping(args: PortMappingArgs) {.thread, raises: [ValueError].} =
ignoreSignalsInThread()
let
(strategy, tcpPort, udpPort, description) = args
interval = initDuration(seconds = PORT_MAPPING_INTERVAL)
sleepDuration = 1_000 # in ms, also the maximum delay after pressing Ctrl-C
var lastUpdate = now()
# We can't use copies of Miniupnp and NatPmp objects in this thread, because they share
# C pointers with other instances that have already been garbage collected, so
# we use threadvars instead and initialise them again with getExternalIP(),
# even though we don't need the external IP's value.
let ipres = getExternalIP(strategy, quiet = true)
if ipres.isSome:
while natClosed.load() == false:
let
# we're being silly here with this channel polling because we can't
# select on Nim channels like on Go ones
currTime = now()
if currTime >= (lastUpdate + interval):
discard doPortMapping(strategy, tcpPort, udpPort, description)
lastUpdate = currTime
sleep(sleepDuration)
proc stopNatThreads() {.noconv.} =
# stop the thread
debug "Stopping NAT port mapping renewal threads"
try:
natClosed.store(true)
joinThreads(natThreads)
except Exception as exc:
warn "Failed to stop NAT port mapping renewal thread", exc = exc.msg
# delete our port mappings
# FIXME: if the initial port mapping failed because it already existed for the
# required external port, we should not delete it. It might have been set up
# by another program.
# In Windows, a new thread is created for the signal handler, so we need to
# initialise our threadvars again.
let ipres = getExternalIP(strategy, quiet = true)
if ipres.isSome:
if strategy == NatStrategy.NatUpnp:
for entry in activeMappings:
for t in [
(entry.externalTcpPort, entry.internalTcpPort, UPNPProtocol.TCP),
(entry.externalUdpPort, entry.internalUdpPort, UPNPProtocol.UDP),
]:
let
(eport, iport, protocol) = t
pmres = upnp.deletePortMapping(externalPort = $eport, protocol = protocol)
if pmres.isErr:
error "UPnP port mapping deletion", msg = pmres.error
else:
debug "UPnP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
elif strategy == NatStrategy.NatPmp:
for entry in activeMappings:
for t in [
(entry.externalTcpPort, entry.internalTcpPort, NatPmpProtocol.TCP),
(entry.externalUdpPort, entry.internalUdpPort, NatPmpProtocol.UDP),
]:
let
(eport, iport, protocol) = t
pmres = npmp.deletePortMapping(
eport = eport.cushort, iport = iport.cushort, protocol = protocol
)
if pmres.isErr:
error "NAT-PMP port mapping deletion", msg = pmres.error
else:
debug "NAT-PMP: deleted port mapping",
externalPort = eport, internalPort = iport, protocol = protocol
proc redirectPorts*(
strategy: NatStrategy, tcpPort, udpPort: Port, description: string
): Option[(Port, Port)] =
result = doPortMapping(strategy, tcpPort, udpPort, description)
if result.isSome:
let (externalTcpPort, externalUdpPort) = result.get()
# needed by NAT-PMP on port mapping deletion
# Port mapping works. Let's launch a thread that repeats it, in case the
# NAT-PMP lease expires or the router is rebooted and forgets all about
# these mappings.
activeMappings.add(
PortMappings(
internalTcpPort: tcpPort,
externalTcpPort: externalTcpPort,
internalUdpPort: udpPort,
externalUdpPort: externalUdpPort,
description: description,
)
)
try:
natThreads.add(Thread[PortMappingArgs]())
natThreads[^1].createThread(
repeatPortMapping, (strategy, externalTcpPort, externalUdpPort, description)
)
# atexit() in disguise
if natThreads.len == 1:
# we should register the thread termination function only once
addQuitProc(stopNatThreads)
except Exception as exc:
warn "Failed to create NAT port mapping renewal thread", exc = exc.msg
proc setupNat*(
natStrategy: NatStrategy, tcpPort, udpPort: Port, clientId: string
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] =
## Setup NAT port mapping and get external IP address.
## If any of this fails, we don't return any IP address but do return the
## original ports as best effort.
## TODO: Allow for tcp or udp port mapping to be optional.
if extIp.isNone:
extIp = getExternalIP(natStrategy)
if extIp.isSome:
let ip = extIp.get
let extPorts = (
{.gcsafe.}:
redirectPorts(
strategy, tcpPort = tcpPort, udpPort = udpPort, description = clientId
)
)
if extPorts.isSome:
let (extTcpPort, extUdpPort) = extPorts.get()
(ip: some(ip), tcpPort: some(extTcpPort), udpPort: some(extUdpPort))
else:
warn "UPnP/NAT-PMP available but port forwarding failed"
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
else:
warn "UPnP/NAT-PMP not available"
(ip: none(IpAddress), tcpPort: some(tcpPort), udpPort: some(udpPort))
proc setupAddress*(
natConfig: NatConfig, bindIp: IpAddress, tcpPort, udpPort: Port, clientId: string
): tuple[ip: Option[IpAddress], tcpPort, udpPort: Option[Port]] {.gcsafe.} =
## Set-up of the external address via any of the ways as configured in
## `NatConfig`. In case all fails an error is logged and the bind ports are
## selected also as external ports, as best effort and in hope that the
## external IP can be figured out by other means at a later stage.
## TODO: Allow for tcp or udp bind ports to be optional.
if natConfig.hasExtIp:
# any required port redirection must be done by hand
return (some(natConfig.extIp), some(tcpPort), some(udpPort))
case natConfig.nat
of NatStrategy.NatAny:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate, BindAddressIsPrivate:
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
of NatStrategy.NatNone:
let (prefSrcIp, prefSrcStatus) = getRoutePrefSrc(bindIp)
case prefSrcStatus
of NoRoutingInfo, PrefSrcIsPublic, BindAddressIsPublic:
return (prefSrcIp, some(tcpPort), some(udpPort))
of PrefSrcIsPrivate:
error "No public IP address found. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of BindAddressIsPrivate:
error "Bind IP is not a public IP address. Should not use --nat:none option"
return (none(IpAddress), some(tcpPort), some(udpPort))
of NatStrategy.NatUpnp, NatStrategy.NatPmp:
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
proc nattedAddress*(
natConfig: NatConfig, addrs: seq[MultiAddress], udpPort: Port
): tuple[libp2p, discovery: seq[MultiAddress]] =
## Takes a NAT configuration, sequence of multiaddresses and UDP port and returns:
## - Modified multiaddresses with NAT-mapped addresses for libp2p
## - Discovery addresses with NAT-mapped UDP ports
var discoveryAddrs = newSeq[MultiAddress](0)
let newAddrs = addrs.mapIt:
block:
# Extract IP address and port from the multiaddress
let (ipPart, port) = getAddressAndPort(it)
if ipPart.isSome and port.isSome:
# Try to setup NAT mapping for the address
let (newIP, tcp, udp) =
setupAddress(natConfig, ipPart.get, port.get, udpPort, "codex")
if newIP.isSome:
# NAT mapping successful - add discovery address with mapped UDP port
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(newIP.get, udp.get))
# Remap original address with NAT IP and TCP port
it.remapAddr(ip = newIP, port = tcp)
else:
# NAT mapping failed - use original address
echo "Failed to get external IP, using original address", it
discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort))
it
else:
# Invalid multiaddress format - return as is
it
(newAddrs, discoveryAddrs)

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -13,8 +13,9 @@ import std/options
import std/sequtils
import std/strformat
import std/sugar
import std/cpuinfo
import times
import pkg/taskpools
import pkg/questionable
import pkg/questionable/results
import pkg/chronos
@ -26,7 +27,6 @@ import pkg/libp2p/stream/bufferstream
# TODO: remove once exported by libp2p
import pkg/libp2p/routing_record
import pkg/libp2p/signed_envelope
import pkg/taskpools
import ./chunker
import ./slots
@ -44,8 +44,8 @@ import ./indexingstrategy
import ./utils
import ./errors
import ./logutils
import ./utils/poseidon2digest
import ./utils/asynciter
import ./utils/safeasynciter
import ./utils/trackedfutures
export logutils
@ -53,13 +53,17 @@ logScope:
topics = "codex node"
const
FetchBatch = 200
DefaultFetchBatch = 1024
MaxOnBatchBlocks = 128
BatchRefillThreshold = 0.75 # Refill when 75% of window completes
type
Contracts* = tuple
client: ?ClientInteractions
host: ?HostInteractions
validator: ?ValidatorInteractions
Contracts* =
tuple[
client: ?ClientInteractions,
host: ?HostInteractions,
validator: ?ValidatorInteractions,
]
CodexNode* = object
switch: Switch
@ -71,12 +75,15 @@ type
contracts*: Contracts
clock*: Clock
storage*: Contracts
taskpool*: Taskpool
taskpool: Taskpool
trackedFutures: TrackedFutures
CodexNodeRef* = ref CodexNode
OnManifest* = proc(cid: Cid, manifest: Manifest): void {.gcsafe, raises: [].}
BatchProc* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].}
BatchProc* =
proc(blocks: seq[bt.Block]): Future[?!void] {.async: (raises: [CancelledError]).}
OnBlockStoredProc = proc(chunk: seq[byte]): void {.gcsafe, raises: [].}
func switch*(self: CodexNodeRef): Switch =
return self.switch
@ -91,8 +98,8 @@ func discovery*(self: CodexNodeRef): Discovery =
return self.discovery
proc storeManifest*(
self: CodexNodeRef,
manifest: Manifest): Future[?!bt.Block] {.async.} =
self: CodexNodeRef, manifest: Manifest
): Future[?!bt.Block] {.async.} =
without encodedVerifiable =? manifest.encode(), err:
trace "Unable to encode manifest"
return failure(err)
@ -108,8 +115,8 @@ proc storeManifest*(
success blk
proc fetchManifest*(
self: CodexNodeRef,
cid: Cid): Future[?!Manifest] {.async.} =
self: CodexNodeRef, cid: Cid
): Future[?!Manifest] {.async: (raises: [CancelledError]).} =
## Fetch and decode a manifest block
##
@ -132,34 +139,32 @@ proc fetchManifest*(
return manifest.success
proc findPeer*(
self: CodexNodeRef,
peerId: PeerId): Future[?PeerRecord] {.async.} =
proc findPeer*(self: CodexNodeRef, peerId: PeerId): Future[?PeerRecord] {.async.} =
## Find peer using the discovery service from the given CodexNode
##
return await self.discovery.findPeer(peerId)
proc connect*(
self: CodexNodeRef,
peerId: PeerId,
addrs: seq[MultiAddress]
self: CodexNodeRef, peerId: PeerId, addrs: seq[MultiAddress]
): Future[void] =
self.switch.connect(peerId, addrs)
proc updateExpiry*(
self: CodexNodeRef,
manifestCid: Cid,
expiry: SecondsSince1970): Future[?!void] {.async.} =
self: CodexNodeRef, manifestCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} =
without manifest =? await self.fetchManifest(manifestCid), error:
trace "Unable to fetch manifest for cid", manifestCid
return failure(error)
try:
let
ensuringFutures = Iter[int].new(0..<manifest.blocksCount)
.mapIt(self.networkStore.localStore.ensureExpiry( manifest.treeCid, it, expiry ))
await allFuturesThrowing(ensuringFutures)
let ensuringFutures = Iter[int].new(0 ..< manifest.blocksCount).mapIt(
self.networkStore.localStore.ensureExpiry(manifest.treeCid, it, expiry)
)
let res = await allFinishedFailed[?!void](ensuringFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -171,8 +176,10 @@ proc fetchBatched*(
self: CodexNodeRef,
cid: Cid,
iter: Iter[int],
batchSize = FetchBatch,
onBatch: BatchProc = nil): Future[?!void] {.async, gcsafe.} =
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raises: [CancelledError]), gcsafe.} =
## Fetch blocks in batches of `batchSize`
##
@ -182,17 +189,60 @@ proc fetchBatched*(
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
# )
while not iter.finished:
let blocks = collect:
# Sliding window: maintain batchSize blocks in-flight
let
refillThreshold = int(float(batchSize) * BatchRefillThreshold)
refillSize = max(refillThreshold, 1)
maxCallbackBlocks = min(batchSize, MaxOnBatchBlocks)
var
blockData: seq[bt.Block]
failedBlocks = 0
successfulBlocks = 0
completedInWindow = 0
var addresses = newSeqOfCap[BlockAddress](batchSize)
for i in 0 ..< batchSize:
if not iter.finished:
self.networkStore.getBlock(BlockAddress.init(cid, iter.next()))
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
addresses.add(address)
if blocksErr =? (await allFutureResult(blocks)).errorOption:
return failure(blocksErr)
var blockResults = await self.networkStore.getBlocks(addresses)
if not onBatch.isNil and
batchErr =? (await onBatch(blocks.mapIt( it.read.get ))).errorOption:
while not blockResults.finished:
without blk =? await blockResults.next(), err:
inc(failedBlocks)
continue
inc(successfulBlocks)
inc(completedInWindow)
if not onBatch.isNil:
blockData.add(blk)
if blockData.len >= maxCallbackBlocks:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
blockData = @[]
if completedInWindow >= refillThreshold and not iter.finished:
var refillAddresses = newSeqOfCap[BlockAddress](refillSize)
for i in 0 ..< refillSize:
if not iter.finished:
let address = BlockAddress.init(cid, iter.next())
if fetchLocal or not (await address in self.networkStore):
refillAddresses.add(address)
if refillAddresses.len > 0:
blockResults =
chain(blockResults, await self.networkStore.getBlocks(refillAddresses))
completedInWindow = 0
if failedBlocks > 0:
return failure("Some blocks failed (Result) to fetch (" & $failedBlocks & ")")
if not onBatch.isNil and blockData.len > 0:
if batchErr =? (await onBatch(blockData)).errorOption:
return failure(batchErr)
success()
@ -200,77 +250,111 @@ proc fetchBatched*(
proc fetchBatched*(
self: CodexNodeRef,
manifest: Manifest,
batchSize = FetchBatch,
onBatch: BatchProc = nil): Future[?!void] =
batchSize = DefaultFetchBatch,
onBatch: BatchProc = nil,
fetchLocal = true,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
## Fetch manifest in batches of `batchSize`
##
trace "Fetching blocks in batches of", size = batchSize
trace "Fetching blocks in batches of",
size = batchSize, blocksCount = manifest.blocksCount
let iter = Iter[int].new(0 ..< manifest.blocksCount)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch)
self.fetchBatched(manifest.treeCid, iter, batchSize, onBatch, fetchLocal)
proc fetchDatasetAsync*(
self: CodexNodeRef, manifest: Manifest, fetchLocal = true
): Future[void] {.async: (raises: []).} =
## Asynchronously fetch a dataset in the background.
## This task will be tracked and cleaned up on node shutdown.
##
try:
if err =? (
await self.fetchBatched(
manifest = manifest, batchSize = DefaultFetchBatch, fetchLocal = fetchLocal
)
).errorOption:
error "Unable to fetch blocks", err = err.msg
except CancelledError as exc:
trace "Cancelled fetching blocks", exc = exc.msg
proc fetchDatasetAsyncTask*(self: CodexNodeRef, manifest: Manifest) =
## Start fetching a dataset in the background.
## The task will be tracked and cleaned up on node shutdown.
##
self.trackedFutures.track(self.fetchDatasetAsync(manifest, fetchLocal = false))
proc streamSingleBlock(
self: CodexNodeRef,
cid: Cid
): Future[?!LPstream] {.async.} =
self: CodexNodeRef, cid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of a single block.
##
trace "Streaming single block", cid = cid
let
stream = BufferStream.new()
let stream = BufferStream.new()
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
return failure(err)
proc streamOneBlock(): Future[void] {.async.} =
proc streamOneBlock(): Future[void] {.async: (raises: []).} =
try:
await stream.pushData(blk.data)
except CatchableError as exc:
trace "Unable to send block", cid, exc = exc.msg
discard
finally:
defer:
await stream.pushEof()
await stream.pushData(blk.data)
except CancelledError as exc:
trace "Streaming block cancelled", cid, exc = exc.msg
except LPStreamError as exc:
trace "Unable to send block", cid, exc = exc.msg
asyncSpawn streamOneBlock()
self.trackedFutures.track(streamOneBlock())
LPStream(stream).success
proc streamEntireDataset(
self: CodexNodeRef,
manifest: Manifest,
manifestCid: Cid,
): ?!LPStream =
self: CodexNodeRef, manifest: Manifest, manifestCid: Cid
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Streams the contents of the entire dataset described by the manifest.
##
trace "Retrieving blocks from manifest", manifestCid
var jobs: seq[Future[void]]
let stream = LPStream(StoreStream.new(self.networkStore, manifest, pad = false))
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async.} =
proc erasureJob(): Future[void] {.async: (raises: []).} =
try:
# Spawn an erasure decoding job
let
erasure = Erasure.new(
self.networkStore,
leoEncoderProvider,
leoDecoderProvider,
self.taskpool)
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without _ =? (await erasure.decode(manifest)), error:
trace "Unable to erasure decode manifest", manifestCid, exc = error.msg
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CatchableError as exc:
trace "Exception decoding manifest", manifestCid, exc = exc.msg
trace "Error erasure decoding manifest", manifestCid, exc = exc.msg
asyncSpawn erasureJob()
jobs.add(erasureJob())
jobs.add(self.fetchDatasetAsync(manifest, fetchLocal = false))
# Monitor stream completion and cancel background jobs when done
proc monitorStream() {.async: (raises: []).} =
try:
await stream.join()
except CancelledError as exc:
warn "Stream cancelled", exc = exc.msg
finally:
await noCancel allFutures(jobs.mapIt(it.cancelAndWait))
self.trackedFutures.track(monitorStream())
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
stream.success
proc retrieve*(
self: CodexNodeRef,
cid: Cid,
local: bool = true): Future[?!LPStream] {.async.} =
self: CodexNodeRef, cid: Cid, local: bool = true
): Future[?!LPStream] {.async: (raises: [CancelledError]).} =
## Retrieve by Cid a single block or an entire dataset described by manifest
##
@ -283,12 +367,75 @@ proc retrieve*(
return await self.streamSingleBlock(cid)
self.streamEntireDataset(manifest, cid)
await self.streamEntireDataset(manifest, cid)
proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
if err =? (await self.networkStore.delBlock(cid)).errorOption:
error "Error deleting block", cid, err = err.msg
return failure(err)
trace "Deleted block", cid
return success()
proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} =
# Deletion is a strictly local operation
var store = self.networkStore.localStore
if not (await cid in store):
# As per the contract for delete*, an absent dataset is not an error.
return success()
without manifestBlock =? await store.getBlock(cid), err:
return failure(err)
without manifest =? Manifest.decode(manifestBlock), err:
return failure(err)
let runtimeQuota = initDuration(milliseconds = 100)
var lastIdle = getTime()
for i in 0 ..< manifest.blocksCount:
if (getTime() - lastIdle) >= runtimeQuota:
await idleAsync()
lastIdle = getTime()
if err =? (await store.delBlock(manifest.treeCid, i)).errorOption:
# The contract for delBlock is fuzzy, but we assume that if the block is
# simply missing we won't get an error. This is a best effort operation and
# can simply be retried.
error "Failed to delete block within dataset", index = i, err = err.msg
return failure(err)
if err =? (await store.delBlock(cid)).errorOption:
error "Error deleting manifest block", err = err.msg
success()
proc delete*(
self: CodexNodeRef, cid: Cid
): Future[?!void] {.async: (raises: [CatchableError]).} =
## Deletes a whole dataset, if Cid is a Manifest Cid, or a single block, if Cid a block Cid,
## from the underlying block store. This is a strictly local operation.
##
## Missing blocks in dataset deletes are ignored.
##
without isManifest =? cid.isManifest, err:
trace "Bad content type for CID:", cid = cid, err = err.msg
return failure(err)
if not isManifest:
return await self.deleteSingleBlock(cid)
await self.deleteEntireDataset(cid)
proc store*(
self: CodexNodeRef,
stream: LPStream,
blockSize = DefaultBlockSize): Future[?!Cid] {.async.} =
filename: ?string = string.none,
mimetype: ?string = string.none,
blockSize = DefaultBlockSize,
onBlockStored: OnBlockStoredProc = nil,
): Future[?!Cid] {.async.} =
## Save stream contents as dataset with given blockSize
## to nodes's BlockStore, and return Cid of its manifest
##
@ -302,10 +449,7 @@ proc store*(
var cids: seq[Cid]
try:
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
while (let chunk = await chunker.getBytes(); chunk.len > 0):
without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err:
return failure(err)
@ -320,6 +464,9 @@ proc store*(
if err =? (await self.networkStore.putBlock(blk)).errorOption:
error "Unable to store block", cid = blk.cid, err = err.msg
return failure(&"Unable to store block {blk.cid}")
if not onBlockStored.isNil:
onBlockStored(chunk)
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -336,7 +483,8 @@ proc store*(
for index, cid in cids:
without proof =? tree.getProof(index), err:
return failure(err)
if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
if err =?
(await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
# TODO add log here
return failure(err)
@ -346,28 +494,31 @@ proc store*(
datasetSize = NBytes(chunker.offset),
version = CIDv1,
hcodec = hcodec,
codec = dataCodec)
codec = dataCodec,
filename = filename,
mimetype = mimetype,
)
without manifestBlk =? await self.storeManifest(manifest), err:
error "Unable to store manifest"
return failure(err)
info "Stored data", manifestCid = manifestBlk.cid,
info "Stored data",
manifestCid = manifestBlk.cid,
treeCid = treeCid,
blocks = manifest.blocksCount,
datasetSize = manifest.datasetSize
await self.discovery.provide(manifestBlk.cid)
await self.discovery.provide(treeCid)
datasetSize = manifest.datasetSize,
filename = manifest.filename,
mimetype = manifest.mimetype
return manifestBlk.cid.success
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
without cids =? await self.networkStore.listBlocks(BlockType.Manifest):
without cidsIter =? await self.networkStore.listBlocks(BlockType.Manifest):
warn "Failed to listBlocks"
return
for c in cids:
for c in cidsIter:
if cid =? await c:
without blk =? await self.networkStore.getBlock(cid):
warn "Failed to get manifest block by cid", cid
@ -382,13 +533,14 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
proc setupRequest(
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256): Future[?!StorageRequest] {.async.} =
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!StorageRequest] {.async.} =
## Setup slots for a given dataset
##
@ -401,9 +553,9 @@ proc setupRequest(
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateral = collateral
collateralPerByte = collateralPerByte
expiry = expiry
ecK = ecK
ecM = ecM
@ -415,12 +567,9 @@ proc setupRequest(
return failure error
# Erasure code the dataset according to provided parameters
let
erasure = Erasure.new(
self.networkStore.localStore,
leoEncoderProvider,
leoDecoderProvider,
self.taskpool)
let erasure = Erasure.new(
self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
trace "Unable to erasure code dataset"
@ -448,18 +597,15 @@ proc setupRequest(
request = StorageRequest(
ask: StorageAsk(
slots: verifiable.numSlots.uint64,
slotSize: builder.slotBytes.uint.u256,
slotSize: builder.slotBytes.uint64,
duration: duration,
proofProbability: proofProbability,
reward: reward,
collateral: collateral,
maxSlotLoss: tolerance
pricePerBytePerSecond: pricePerBytePerSecond,
collateralPerByte: collateralPerByte,
maxSlotLoss: tolerance,
),
content: StorageContent(
cid: $manifestBlk.cid, # TODO: why string?
merkleRoot: verifyRoot
),
expiry: expiry
content: StorageContent(cid: manifestBlk.cid, merkleRoot: verifyRoot),
expiry: expiry,
)
trace "Request created", request = $request
@ -468,13 +614,14 @@ proc setupRequest(
proc requestStorage*(
self: CodexNodeRef,
cid: Cid,
duration: UInt256,
duration: uint64,
proofProbability: UInt256,
nodes: uint,
tolerance: uint,
reward: UInt256,
collateral: UInt256,
expiry: UInt256): Future[?!PurchaseId] {.async.} =
pricePerBytePerSecond: UInt256,
collateralPerByte: UInt256,
expiry: uint64,
): Future[?!PurchaseId] {.async.} =
## Initiate a request for storage sequence, this might
## be a multistep procedure.
##
@ -484,10 +631,10 @@ proc requestStorage*(
duration = duration
nodes = nodes
tolerance = tolerance
reward = reward
pricePerBytePerSecond = pricePerBytePerSecond
proofProbability = proofProbability
collateral = collateral
expiry = expiry.truncate(int64)
collateralPerByte = collateralPerByte
expiry = expiry
now = self.clock.now
trace "Received a request for storage!"
@ -496,16 +643,12 @@ proc requestStorage*(
trace "Purchasing not available"
return failure "Purchasing not available"
without request =?
(await self.setupRequest(
cid,
duration,
proofProbability,
nodes,
tolerance,
reward,
collateral,
expiry)), err:
without request =? (
await self.setupRequest(
cid, duration, proofProbability, nodes, tolerance, pricePerBytePerSecond,
collateralPerByte, expiry,
)
), err:
trace "Unable to setup request"
return failure err
@ -515,43 +658,47 @@ proc requestStorage*(
proc onStore(
self: CodexNodeRef,
request: StorageRequest,
slotIdx: UInt256,
blocksCb: BlocksCb): Future[?!void] {.async.} =
expiry: SecondsSince1970,
slotIdx: uint64,
blocksCb: BlocksCb,
isRepairing: bool = false,
): Future[?!void] {.async: (raises: [CancelledError]).} =
## store data in local storage
##
let cid = request.content.cid
logScope:
cid = request.content.cid
cid = $cid
slotIdx = slotIdx
trace "Received a request to store a slot"
without cid =? Cid.init(request.content.cid).mapFailure, err:
trace "Unable to parse Cid", cid
return failure(err)
without manifest =? (await self.fetchManifest(cid)), err:
trace "Unable to fetch manifest for cid", cid, err = err.msg
return failure(err)
without builder =? Poseidon2Builder.new(self.networkStore, manifest), err:
without builder =?
Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err:
trace "Unable to create slots builder", err = err.msg
return failure(err)
let
slotIdx = slotIdx.truncate(int)
expiry = request.expiry.toSecondsSince1970
if slotIdx > manifest.slotRoots.high:
if slotIdx > manifest.slotRoots.high.uint64:
trace "Slot index not in manifest", slotIdx
return failure(newException(CodexError, "Slot index not in manifest"))
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
proc updateExpiry(
blocks: seq[bt.Block]
): Future[?!void] {.async: (raises: [CancelledError]).} =
trace "Updating expiry for blocks", blocks = blocks.len
let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
return failure(updateExpiryErr)
let ensureExpiryFutures =
blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
let res = await allFinishedFailed[?!void](ensureExpiryFutures)
if res.failure.len > 0:
trace "Some blocks failed to update expiry", len = res.failure.len
return failure("Some blocks failed to update expiry (" & $res.failure.len & " )")
if not blocksCb.isNil and err =? (await blocksCb(blocks)).errorOption:
trace "Unable to process blocks", err = err.msg
@ -559,30 +706,48 @@ proc onStore(
return success()
without indexer =? manifest.protectedStrategy.init(
0, manifest.numSlotBlocks() - 1, manifest.numSlots).catch, err:
if slotIdx > int.high.uint64:
error "Cannot cast slot index to int", slotIndex = slotIdx
return
if isRepairing:
trace "start repairing slot", slotIdx
try:
let erasure = Erasure.new(
self.networkStore, leoEncoderProvider, leoDecoderProvider, self.taskpool
)
if err =? (await erasure.repair(manifest)).errorOption:
error "Unable to erasure decode repairing manifest",
cid = manifest.treeCid, exc = err.msg
return failure(err)
except CatchableError as exc:
error "Error erasure decoding repairing manifest",
cid = manifest.treeCid, exc = exc.msg
return failure(exc.msg)
else:
without indexer =?
manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch,
err:
trace "Unable to create indexing strategy from protected manifest", err = err.msg
return failure(err)
without blksIter =? indexer.getIndicies(slotIdx).catch, err:
trace "Unable to get indicies from strategy", err = err.msg
without blksIter =? indexer.getIndices(slotIdx.int).catch, err:
trace "Unable to get indices from strategy", err = err.msg
return failure(err)
if err =? (await self.fetchBatched(
manifest.treeCid,
blksIter,
onBatch = updateExpiry)).errorOption:
if err =? (
await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry)
).errorOption:
trace "Unable to fetch blocks", err = err.msg
return failure(err)
without slotRoot =? (await builder.buildSlot(slotIdx.Natural)), err:
without slotRoot =? (await builder.buildSlot(slotIdx.int)), err:
trace "Unable to build slot", err = err.msg
return failure(err)
trace "Slot successfully retrieved and reconstructed"
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]:
trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx]:
trace "Slot root mismatch",
manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
return failure(newException(CodexError, "Slot root mismatch"))
trace "Slot successfully retrieved and reconstructed"
@ -590,15 +755,14 @@ proc onStore(
return success()
proc onProve(
self: CodexNodeRef,
slot: Slot,
challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} =
self: CodexNodeRef, slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raises: [CancelledError]).} =
## Generats a proof for a given slot and challenge
##
let
cidStr = slot.request.content.cid
slotIdx = slot.slotIndex.truncate(Natural)
cidStr = $slot.request.content.cid
slotIdx = slot.slotIndex
logScope:
cid = cidStr
@ -619,7 +783,8 @@ proc onProve(
return failure(err)
when defined(verify_circuit):
without (inputs, proof) =? await prover.prove(slotIdx, manifest, challenge), err:
without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge),
err:
error "Unable to generate proof", err = err.msg
return failure(err)
@ -633,7 +798,7 @@ proc onProve(
trace "Proof verified successfully"
else:
without (_, proof) =? await prover.prove(slotIdx, manifest, challenge), err:
without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err:
error "Unable to generate proof", err = err.msg
return failure(err)
@ -646,20 +811,11 @@ proc onProve(
failure "Prover not enabled"
proc onExpiryUpdate(
self: CodexNodeRef,
rootCid: string,
expiry: SecondsSince1970): Future[?!void] {.async.} =
without cid =? Cid.init(rootCid):
trace "Unable to parse Cid", cid
let error = newException(CodexError, "Unable to parse Cid")
return failure(error)
self: CodexNodeRef, rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raises: [CancelledError]).} =
return await self.updateExpiry(rootCid, expiry)
return await self.updateExpiry(cid, expiry)
proc onClear(
self: CodexNodeRef,
request: StorageRequest,
slotIndex: UInt256) =
proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
discard
@ -674,23 +830,27 @@ proc start*(self: CodexNodeRef) {.async.} =
await self.clock.start()
if hostContracts =? self.contracts.host:
hostContracts.sales.onStore =
proc(
hostContracts.sales.onStore = proc(
request: StorageRequest,
slot: UInt256,
onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch)
expiry: SecondsSince1970,
slot: uint64,
onBatch: BatchProc,
isRepairing: bool = false,
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onStore(request, expiry, slot, onBatch, isRepairing)
hostContracts.sales.onExpiryUpdate =
proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] =
hostContracts.sales.onExpiryUpdate = proc(
rootCid: Cid, expiry: SecondsSince1970
): Future[?!void] {.async: (raw: true, raises: [CancelledError]).} =
self.onExpiryUpdate(rootCid, expiry)
hostContracts.sales.onClear =
proc(request: StorageRequest, slotIndex: UInt256) =
hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: uint64) =
# TODO: remove data from local storage
self.onClear(request, slotIndex)
hostContracts.sales.onProve =
proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] =
hostContracts.sales.onProve = proc(
slot: Slot, challenge: ProofChallenge
): Future[?!Groth16Proof] {.async: (raw: true, raises: [CancelledError]).} =
# TODO: generate proof
self.onProve(slot, challenge)
@ -721,20 +881,19 @@ proc start*(self: CodexNodeRef) {.async.} =
self.contracts.validator = ValidatorInteractions.none
self.networkId = self.switch.peerInfo.peerId
notice "Started codex node", id = self.networkId, addrs = self.switch.peerInfo.addrs
notice "Started Storage node", id = self.networkId, addrs = self.switch.peerInfo.addrs
proc stop*(self: CodexNodeRef) {.async.} =
trace "Stopping node"
await self.trackedFutures.cancelTracked()
if not self.engine.isNil:
await self.engine.stop()
if not self.discovery.isNil:
await self.discovery.stop()
if not self.clock.isNil:
await self.clock.stop()
if clientContracts =? self.contracts.client:
await clientContracts.stop()
@ -744,6 +903,10 @@ proc stop*(self: CodexNodeRef) {.async.} =
if validatorContracts =? self.contracts.validator:
await validatorContracts.stop()
if not self.clock.isNil:
await self.clock.stop()
proc close*(self: CodexNodeRef) {.async.} =
if not self.networkStore.isNil:
await self.networkStore.close
@ -753,9 +916,10 @@ proc new*(
networkStore: NetworkStore,
engine: BlockExcEngine,
discovery: Discovery,
taskpool: Taskpool,
prover = Prover.none,
contracts = Contracts.default,
taskpool = Taskpool.new(num_threads = countProcessors())): CodexNodeRef =
): CodexNodeRef =
## Create new instance of a Codex self, call `start` to run it
##
@ -765,5 +929,14 @@ proc new*(
engine: engine,
prover: prover,
discovery: discovery,
taskPool: taskpool,
contracts: contracts,
taskpool: taskpool)
trackedFutures: TrackedFutures(),
)
proc hasLocalBlock*(
self: CodexNodeRef, cid: Cid
): Future[bool] {.async: (raises: [CancelledError]).} =
## Returns true if the given Cid is present in the local store
return await (cid in self.networkStore.localStore)

View File

@ -2,9 +2,10 @@ import pkg/stint
type
Periodicity* = object
seconds*: UInt256
Period* = UInt256
Timestamp* = UInt256
seconds*: uint64
Period* = uint64
Timestamp* = uint64
func periodOf*(periodicity: Periodicity, timestamp: Timestamp): Period =
timestamp div periodicity.seconds

View File

@ -14,20 +14,17 @@ export purchase
type
Purchasing* = ref object
market: Market
market*: Market
clock: Clock
purchases: Table[PurchaseId, Purchase]
proofProbability*: UInt256
PurchaseTimeout* = Timeout
const DefaultProofProbability = 100.u256
proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing =
Purchasing(
market: market,
clock: clock,
proofProbability: DefaultProofProbability,
)
Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability)
proc load*(purchasing: Purchasing) {.async.} =
let market = purchasing.market
@ -43,8 +40,8 @@ proc start*(purchasing: Purchasing) {.async.} =
proc stop*(purchasing: Purchasing) {.async.} =
discard
proc populate*(purchasing: Purchasing,
request: StorageRequest
proc populate*(
purchasing: Purchasing, request: StorageRequest
): Future[StorageRequest] {.async.} =
result = request
if result.ask.proofProbability == 0.u256:
@ -55,8 +52,8 @@ proc populate*(purchasing: Purchasing,
result.nonce = Nonce(id)
result.client = await purchasing.market.getSigner()
proc purchase*(purchasing: Purchasing,
request: StorageRequest
proc purchase*(
purchasing: Purchasing, request: StorageRequest
): Future[Purchase] {.async.} =
let request = await purchasing.populate(request)
let purchase = Purchase.new(request, purchasing.market, purchasing.clock)
@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] =
for key in purchasing.purchases.keys:
pIds.add(key)
return pIds

View File

@ -25,10 +25,7 @@ export purchaseid
export statemachine
func new*(
_: type Purchase,
requestId: RequestId,
market: Market,
clock: Clock
_: type Purchase, requestId: RequestId, market: Market, clock: Clock
): Purchase =
## create a new instance of a Purchase
##
@ -42,10 +39,7 @@ func new*(
return purchase
func new*(
_: type Purchase,
request: StorageRequest,
market: Market,
clock: Clock
_: type Purchase, request: StorageRequest, market: Market, clock: Clock
): Purchase =
## Create a new purchase using the given market and clock
let purchase = Purchase.new(request.id, market, clock)
@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) =
func state*(purchase: Purchase): ?string =
proc description(state: State): string =
$state
purchase.query(description)

View File

@ -1,12 +1,14 @@
import std/hashes
import pkg/nimcrypto
import ../logutils
type PurchaseId* = distinct array[32, byte]
logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog
logutils.formatIt(LogFormat.textLines, PurchaseId):
it.short0xHexLog
logutils.formatIt(LogFormat.json, PurchaseId):
it.to0xHexLog
proc hash*(x: PurchaseId): Hash {.borrow.}
proc `==`*(x, y: PurchaseId): bool {.borrow.}
proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex
proc toHex*(x: PurchaseId): string =
array[32, byte](x).toHex

View File

@ -14,5 +14,6 @@ type
clock*: Clock
requestId*: RequestId
request*: ?StorageRequest
PurchaseState* = ref object of State
PurchaseError* = object of CodexError

View File

@ -1,25 +1,35 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./errorhandling
import ./error
declareCounter(codex_purchases_cancelled, "codex purchases cancelled")
logScope:
topics = "marketplace purchases cancelled"
type PurchaseCancelled* = ref object of ErrorHandlingState
type PurchaseCancelled* = ref object of PurchaseState
method `$`*(state: PurchaseCancelled): string =
"cancelled"
method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseCancelled, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_cancelled.inc()
let purchase = Purchase(machine)
warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId
try:
warn "Request cancelled, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
let error = newException(Timeout, "Purchase cancelled due to timeout")
purchase.future.fail(error)
except CancelledError as e:
trace "PurchaseCancelled.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseCancelled.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -14,10 +14,13 @@ type PurchaseErrored* = ref object of PurchaseState
method `$`*(state: PurchaseErrored): string =
"errored"
method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseErrored, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_error.inc()
let purchase = Purchase(machine)
error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId
error "Purchasing error",
error = state.error.msgDetail, requestId = purchase.requestId
purchase.future.fail(state.error)

View File

@ -1,9 +0,0 @@
import pkg/questionable
import ../statemachine
import ./error
type
ErrorHandlingState* = ref object of PurchaseState
method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State =
some State(PurchaseErrored(error: error))

View File

@ -1,16 +1,30 @@
import pkg/metrics
import ../statemachine
import ../../logutils
import ../../utils/exceptions
import ./error
declareCounter(codex_purchases_failed, "codex purchases failed")
type
PurchaseFailed* = ref object of PurchaseState
type PurchaseFailed* = ref object of PurchaseState
method `$`*(state: PurchaseFailed): string =
"failed"
method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseFailed, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_failed.inc()
let purchase = Purchase(machine)
try:
warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
except CancelledError as e:
trace "PurchaseFailed.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFailed.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
let error = newException(PurchaseError, "Purchase failed")
return some State(PurchaseErrored(error: error))

View File

@ -1,7 +1,9 @@
import pkg/metrics
import ../statemachine
import ../../utils/exceptions
import ../../logutils
import ./error
declareCounter(codex_purchases_finished, "codex purchases finished")
@ -13,8 +15,19 @@ type PurchaseFinished* = ref object of PurchaseState
method `$`*(state: PurchaseFinished): string =
"finished"
method run*(state: PurchaseFinished, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseFinished, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_finished.inc()
let purchase = Purchase(machine)
info "Purchase finished", requestId = purchase.requestId
try:
info "Purchase finished, withdrawing remaining funds",
requestId = purchase.requestId
await purchase.market.withdrawFunds(purchase.requestId)
purchase.future.complete()
except CancelledError as e:
trace "PurchaseFinished.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseFinished.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,18 +1,28 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./errorhandling
import ./submitted
import ./error
declareCounter(codex_purchases_pending, "codex purchases pending")
type PurchasePending* = ref object of ErrorHandlingState
type PurchasePending* = ref object of PurchaseState
method `$`*(state: PurchasePending): string =
"pending"
method run*(state: PurchasePending, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchasePending, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_pending.inc()
let purchase = Purchase(machine)
try:
let request = !purchase.request
await purchase.market.requestStorage(request)
return some State(PurchaseSubmitted())
except CancelledError as e:
trace "PurchasePending.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchasePending.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,22 +1,25 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./errorhandling
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_started, "codex purchases started")
logScope:
topics = "marketplace purchases started"
type PurchaseStarted* = ref object of ErrorHandlingState
type PurchaseStarted* = ref object of PurchaseState
method `$`*(state: PurchaseStarted): string =
"started"
method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseStarted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_started.inc()
let purchase = Purchase(machine)
@ -27,15 +30,25 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.}
let failed = newFuture[void]()
proc callback(_: RequestId) =
failed.complete()
var ended: Future[void]
try:
let subscription = await market.subscribeRequestFailed(purchase.requestId, callback)
# Ensure that we're past the request end by waiting an additional second
let ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
ended = clock.waitUntil((await market.getRequestEnd(purchase.requestId)) + 1)
let fut = await one(ended, failed)
await subscription.unsubscribe()
if fut.id == failed.id:
ended.cancel()
ended.cancelSoon()
return some State(PurchaseFailed())
else:
failed.cancel()
failed.cancelSoon()
return some State(PurchaseFinished())
except CancelledError as e:
ended.cancelSoon()
failed.cancelSoon()
trace "PurchaseStarted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseStarted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

View File

@ -1,36 +1,41 @@
import pkg/metrics
import ../../logutils
import ../../utils/exceptions
import ../statemachine
import ./errorhandling
import ./started
import ./cancelled
import ./error
logScope:
topics = "marketplace purchases submitted"
declareCounter(codex_purchases_submitted, "codex purchases submitted")
type PurchaseSubmitted* = ref object of ErrorHandlingState
type PurchaseSubmitted* = ref object of PurchaseState
method `$`*(state: PurchaseSubmitted): string =
"submitted"
method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseSubmitted, machine: Machine
): Future[?State] {.async: (raises: []).} =
codex_purchases_submitted.inc()
let purchase = Purchase(machine)
let request = !purchase.request
let market = purchase.market
let clock = purchase.clock
info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId
info "Request submitted, waiting for slots to be filled",
requestId = purchase.requestId
proc wait {.async.} =
let done = newFuture[void]()
proc wait() {.async.} =
let done = newAsyncEvent()
proc callback(_: RequestId) =
done.complete()
done.fire()
let subscription = await market.subscribeFulfillment(request.id, callback)
await done
await done.wait()
await subscription.unsubscribe()
proc withTimeout(future: Future[void]) {.async.} =
@ -42,5 +47,10 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async.
await wait().withTimeout()
except Timeout:
return some State(PurchaseCancelled())
except CancelledError as e:
trace "PurchaseSubmitted.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseSubmitted.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))
return some State(PurchaseStarted())

View File

@ -1,25 +1,29 @@
import pkg/metrics
import ../../utils/exceptions
import ../../logutils
import ../statemachine
import ./errorhandling
import ./submitted
import ./started
import ./cancelled
import ./finished
import ./failed
import ./error
declareCounter(codex_purchases_unknown, "codex purchases unknown")
type PurchaseUnknown* = ref object of ErrorHandlingState
type PurchaseUnknown* = ref object of PurchaseState
method `$`*(state: PurchaseUnknown): string =
"unknown"
method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} =
method run*(
state: PurchaseUnknown, machine: Machine
): Future[?State] {.async: (raises: []).} =
try:
codex_purchases_unknown.inc()
let purchase = Purchase(machine)
if (request =? await purchase.market.getRequest(purchase.requestId)) and
(requestState =? await purchase.market.requestState(purchase.requestId)):
purchase.request = some request
case requestState
@ -33,3 +37,8 @@ method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.}
return some State(PurchaseFinished())
of RequestState.Failed:
return some State(PurchaseFailed())
except CancelledError as e:
trace "PurchaseUnknown.run was cancelled", error = e.msgDetail
except CatchableError as e:
error "Error during PurchaseUnknown.run", error = e.msgDetail
return some State(PurchaseErrored(error: e))

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
## Nim-Codex
## Logos Storage
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -14,7 +14,7 @@ import pkg/chronos
import pkg/libp2p
import pkg/stew/base10
import pkg/stew/byteutils
import pkg/stew/results
import pkg/results
import pkg/stint
import ../sales
@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] =
ok($cid)
proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] =
Cid
.init(value)
.mapErr do(e: CidError) -> cstring:
Cid.init(value).mapErr do(e: CidError) -> cstring:
case e
of CidError.Incorrect: "Incorrect Cid".cstring
of CidError.Unsupported: "Unsupported Cid".cstring
@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] =
ok($address)
proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
MultiAddress
.init(value)
.mapErr do(e: string) -> cstring: cstring(e)
MultiAddress.init(value).mapErr do(e: string) -> cstring:
cstring(e)
proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] =
Base10.decode(T, value)
@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] =
except ValueError as e:
err e.msg.cstring
proc decodeString*(_: type array[32, byte],
value: string): Result[array[32, byte], cstring] =
proc decodeString*(
_: type array[32, byte], value: string
): Result[array[32, byte], cstring] =
try:
ok array[32, byte].fromHex(value)
except ValueError as e:
err e.msg.cstring
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T,
value: string): Result[T, cstring] =
proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](
_: type T, value: string
): Result[T, cstring] =
array[32, byte].decodeString(value).map(id => T(id))
proc decodeString*(t: typedesc[string],
value: string): Result[string, cstring] =
proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] =
ok(value)
proc encodeString*(value: string): RestResult[string] =

Some files were not shown because too many files have changed in this diff Show More