Merge remote-tracking branch 'origin/main' into por-backends
fixing merge conflicts between por-serialize and por-backends # Conflicts: # codex/storageproofs/backends/backend_blst.nim # codex/storageproofs/backends/backend_constantine.nim # codex/storageproofs/por/por.nim # dagger/por/README.md Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
This commit is contained in:
commit
1c08b33a66
|
@ -36,7 +36,7 @@ jobs:
|
|||
builder: windows-2019
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch }}'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
timeout-minutes: 40
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout nim-codex
|
||||
uses: actions/checkout@v2
|
||||
|
@ -209,12 +209,12 @@ jobs:
|
|||
make NIM_COMMIT="${{ matrix.branch }}" testAll
|
||||
if [[ ${{ runner.os }} = macOS ]]; then
|
||||
echo
|
||||
echo otool -L build/testDagger
|
||||
otool -L build/testDagger
|
||||
echo otool -L build/testCodex
|
||||
otool -L build/testCodex
|
||||
else
|
||||
echo
|
||||
echo ldd build/testDagger
|
||||
ldd build/testDagger
|
||||
echo ldd build/testCodex
|
||||
ldd build/testCodex
|
||||
fi
|
||||
echo "Testing TRACE log level"
|
||||
./env.sh nim c -d:chronicles_log_level=TRACE dagger.nim
|
||||
./env.sh nim c -d:chronicles_log_level=TRACE codex.nim
|
||||
|
|
|
@ -27,4 +27,4 @@ nimble.paths
|
|||
.env
|
||||
|
||||
.update.timestamp
|
||||
dagger.nims
|
||||
codex.nims
|
||||
|
|
26
Makefile
26
Makefile
|
@ -56,15 +56,15 @@ else
|
|||
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
||||
endif
|
||||
|
||||
deps: | deps-common nat-libs dagger.nims
|
||||
deps: | deps-common nat-libs codex.nims
|
||||
ifneq ($(USE_LIBBACKTRACE), 0)
|
||||
deps: | libbacktrace
|
||||
endif
|
||||
|
||||
#- deletes and recreates "dagger.nims" which on Windows is a copy instead of a proper symlink
|
||||
#- deletes and recreates "codex.nims" which on Windows is a copy instead of a proper symlink
|
||||
update: | update-common
|
||||
rm -rf dagger.nims && \
|
||||
$(MAKE) dagger.nims $(HANDLE_OUTPUT)
|
||||
rm -rf codex.nims && \
|
||||
$(MAKE) codex.nims $(HANDLE_OUTPUT)
|
||||
|
||||
# detecting the os
|
||||
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
||||
|
@ -79,16 +79,16 @@ endif
|
|||
# Builds and run a part of the test suite
|
||||
test: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) dagger.nims
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) codex.nims
|
||||
|
||||
# Builds and runs all tests
|
||||
testAll: | build deps
|
||||
echo -e $(BUILD_MSG) "build/testDagger" "build/testContracts" && \
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) dagger.nims
|
||||
echo -e $(BUILD_MSG) "build/testCodex" "build/testContracts" && \
|
||||
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) codex.nims
|
||||
|
||||
# symlink
|
||||
dagger.nims:
|
||||
ln -s dagger.nimble $@
|
||||
codex.nims:
|
||||
ln -s codex.nimble $@
|
||||
|
||||
# nim-libbacktrace
|
||||
libbacktrace:
|
||||
|
@ -96,12 +96,12 @@ libbacktrace:
|
|||
|
||||
coverage:
|
||||
$(MAKE) NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage" testAll
|
||||
cd nimcache/release/testDagger && rm -f *.c
|
||||
cd nimcache/release/testCodex && rm -f *.c
|
||||
cd nimcache/release/testContracts && rm -f *.c
|
||||
mkdir -p coverage
|
||||
lcov --capture --directory nimcache/release/testDagger --directory nimcache/release/testContracts --output-file coverage/coverage.info
|
||||
shopt -s globstar && ls $$(pwd)/dagger/{*,**/*}.nim
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/dagger/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
lcov --capture --directory nimcache/release/testCodex --directory nimcache/release/testContracts --output-file coverage/coverage.info
|
||||
shopt -s globstar && ls $$(pwd)/codex/{*,**/*}.nim
|
||||
shopt -s globstar && lcov --extract coverage/coverage.info $$(pwd)/codex/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
echo -e $(BUILD_MSG) "coverage/report/index.html"
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/report
|
||||
if which open >/dev/null; then (echo -e "\e[92mOpening\e[39m HTML coverage report in browser..." && open coverage/report/index.html) || true; fi
|
||||
|
|
|
@ -20,3 +20,4 @@ coverage:
|
|||
# `informational`: https://docs.codecov.com/docs/commit-status#informational
|
||||
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
|
||||
informational: true
|
||||
comment: false
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -12,23 +12,23 @@ import pkg/chronos
|
|||
import pkg/confutils
|
||||
import pkg/libp2p
|
||||
|
||||
import ./dagger/conf
|
||||
import ./dagger/dagger
|
||||
import ./codex/conf
|
||||
import ./codex/codex
|
||||
|
||||
export dagger, conf, libp2p, chronos, chronicles
|
||||
export codex, conf, libp2p, chronos, chronicles
|
||||
|
||||
when isMainModule:
|
||||
import std/os
|
||||
|
||||
import pkg/confutils/defs
|
||||
|
||||
import ./dagger/utils/fileutils
|
||||
import ./codex/utils/fileutils
|
||||
|
||||
when defined(posix):
|
||||
import system/ansi_c
|
||||
|
||||
let config = DaggerConf.load(
|
||||
version = daggerFullVersion
|
||||
let config = CodexConf.load(
|
||||
version = codexFullVersion
|
||||
)
|
||||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
@ -50,7 +50,7 @@ when isMainModule:
|
|||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
|
||||
let server = DaggerServer.new(config)
|
||||
let server = CodexServer.new(config)
|
||||
|
||||
## Ctrl+C handling
|
||||
proc controlCHandler() {.noconv.} =
|
|
@ -1,7 +1,7 @@
|
|||
mode = ScriptMode.Verbose
|
||||
|
||||
version = "0.1.0"
|
||||
author = "Dagger Team"
|
||||
author = "Codex Team"
|
||||
description = "p2p data durability engine"
|
||||
license = "MIT"
|
||||
binDir = "build"
|
||||
|
@ -28,7 +28,7 @@ requires "nim >= 1.2.0",
|
|||
|
||||
when declared(namedBin):
|
||||
namedBin = {
|
||||
"dagger/dagger": "dagger"
|
||||
"codex/codex": "codex"
|
||||
}.toTable()
|
||||
|
||||
### Helper functions
|
||||
|
@ -45,23 +45,23 @@ proc test(name: string, srcDir = "tests/", lang = "c") =
|
|||
buildBinary name, srcDir
|
||||
exec "build/" & name
|
||||
|
||||
task dagger, "build dagger binary":
|
||||
buildBinary "dagger"
|
||||
task codex, "build codex binary":
|
||||
buildBinary "codex"
|
||||
|
||||
task testDagger, "Build & run Dagger tests":
|
||||
test "testDagger"
|
||||
task testCodex, "Build & run Codex tests":
|
||||
test "testCodex"
|
||||
|
||||
task testContracts, "Build & run Dagger Contract tests":
|
||||
task testContracts, "Build & run Codex Contract tests":
|
||||
test "testContracts"
|
||||
|
||||
task testIntegration, "Run integration tests":
|
||||
daggerTask()
|
||||
codexTask()
|
||||
test "testIntegration"
|
||||
|
||||
task test, "Run tests":
|
||||
testDaggerTask()
|
||||
testCodexTask()
|
||||
|
||||
task testAll, "Run all tests":
|
||||
testDaggerTask()
|
||||
testCodexTask()
|
||||
testContractsTask()
|
||||
testIntegrationTask()
|
|
@ -0,0 +1,5 @@
|
|||
import ./engine/discovery
|
||||
import ./engine/engine
|
||||
import ./engine/payments
|
||||
|
||||
export discovery, engine, payments
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -22,10 +22,10 @@ import ../../utils
|
|||
import ../../discovery
|
||||
import ../../stores/blockstore
|
||||
|
||||
import ../pendingblocks
|
||||
import ./pendingblocks
|
||||
|
||||
logScope:
|
||||
topics = "dagger discovery engine"
|
||||
topics = "codex discovery engine"
|
||||
|
||||
const
|
||||
DefaultConcurrentDiscRequests = 10
|
||||
|
@ -105,7 +105,7 @@ proc advertiseTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
|
||||
try:
|
||||
trace "Advertising block", cid = $cid
|
||||
let request = b.discovery.provideBlock(cid)
|
||||
let request = b.discovery.provide(cid)
|
||||
b.inFlightAdvReqs[cid] = request
|
||||
await request
|
||||
finally:
|
||||
|
@ -137,7 +137,7 @@ proc discoveryTaskLoop(b: DiscoveryEngine) {.async.} =
|
|||
try:
|
||||
let
|
||||
request = b.discovery
|
||||
.findBlockProviders(cid)
|
||||
.find(cid)
|
||||
.wait(DefaultDiscoveryTimeout)
|
||||
|
||||
b.inFlightDiscReqs[cid] = request
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -14,23 +14,24 @@ import pkg/chronos
|
|||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
import ../stores/blockstore
|
||||
import ../blocktype as bt
|
||||
import ../utils
|
||||
import ../../stores/blockstore
|
||||
import ../../blocktype as bt
|
||||
import ../../utils
|
||||
|
||||
import ./protobuf/blockexc
|
||||
import ./protobuf/presence
|
||||
import ../protobuf/blockexc
|
||||
import ../protobuf/presence
|
||||
|
||||
import ./network
|
||||
import ./peers
|
||||
import ./engine/payments
|
||||
import ./engine/discovery
|
||||
import ../network
|
||||
import ../peers
|
||||
|
||||
import ./payments
|
||||
import ./discovery
|
||||
import ./pendingblocks
|
||||
|
||||
export peers, pendingblocks, payments, discovery
|
||||
|
||||
logScope:
|
||||
topics = "dagger blockexc engine"
|
||||
topics = "codex blockexc engine"
|
||||
|
||||
const
|
||||
DefaultMaxPeersPerRequest* = 10
|
|
@ -1,3 +1,12 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/math
|
||||
import pkg/nitro
|
||||
import pkg/questionable/results
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -18,10 +18,10 @@ import pkg/chronicles
|
|||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import ../blocktype
|
||||
import ../../blocktype
|
||||
|
||||
logScope:
|
||||
topics = "dagger blockexc pendingblocks"
|
||||
topics = "codex blockexc pendingblocks"
|
||||
|
||||
const
|
||||
DefaultBlockTimeout* = 10.minutes
|
|
@ -0,0 +1,4 @@
|
|||
import ./network/network
|
||||
import ./network/networkpeer
|
||||
|
||||
export network, networkpeer
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -17,18 +17,18 @@ import pkg/libp2p
|
|||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ../blocktype as bt
|
||||
import ./protobuf/blockexc as pb
|
||||
import ./protobuf/payments
|
||||
import ../../blocktype as bt
|
||||
import ../protobuf/blockexc as pb
|
||||
import ../protobuf/payments
|
||||
|
||||
import ./networkpeer
|
||||
|
||||
export networkpeer, payments
|
||||
export network, payments
|
||||
|
||||
logScope:
|
||||
topics = "dagger blockexc network"
|
||||
topics = "codex blockexc network"
|
||||
|
||||
const Codec* = "/dagger/blockexc/1.0.0"
|
||||
const Codec* = "/codex/blockexc/1.0.0"
|
||||
|
||||
type
|
||||
WantListHandler* = proc(peer: PeerID, wantList: WantList): Future[void] {.gcsafe.}
|
||||
|
@ -82,7 +82,7 @@ proc handleWantList(
|
|||
if isNil(b.handlers.onWantList):
|
||||
return
|
||||
|
||||
trace "Handling want list for peer", peer = peer.id
|
||||
trace "Handling want list for peer", peer = peer.id, items = list.entries.len
|
||||
b.handlers.onWantList(peer.id, list)
|
||||
|
||||
# TODO: make into a template
|
||||
|
@ -119,7 +119,7 @@ proc broadcastWantList*(
|
|||
if id notin b.peers:
|
||||
return
|
||||
|
||||
trace "Sending want list to peer", peer = id, `type` = $wantType, len = cids.len
|
||||
trace "Sending want list to peer", peer = id, `type` = $wantType, items = cids.len
|
||||
|
||||
let
|
||||
wantList = makeWantList(
|
||||
|
@ -142,7 +142,7 @@ proc handleBlocks(
|
|||
if isNil(b.handlers.onBlocks):
|
||||
return
|
||||
|
||||
trace "Handling blocks for peer", peer = peer.id
|
||||
trace "Handling blocks for peer", peer = peer.id, items = blocks.len
|
||||
|
||||
var blks: seq[bt.Block]
|
||||
for blob in blocks:
|
||||
|
@ -178,7 +178,7 @@ proc broadcastBlocks*(
|
|||
return
|
||||
|
||||
b.peers.withValue(id, peer):
|
||||
trace "Sending blocks to peer", peer = id, len = blocks.len
|
||||
trace "Sending blocks to peer", peer = id, items = blocks.len
|
||||
peer[].broadcast(pb.Message(payload: makeBlocks(blocks)))
|
||||
|
||||
proc handleBlockPresence(
|
||||
|
@ -191,7 +191,7 @@ proc handleBlockPresence(
|
|||
if isNil(b.handlers.onPresence):
|
||||
return
|
||||
|
||||
trace "Handling block presence for peer", peer = peer.id
|
||||
trace "Handling block presence for peer", peer = peer.id, items = presence.len
|
||||
b.handlers.onPresence(peer.id, presence)
|
||||
|
||||
proc broadcastBlockPresence*(
|
||||
|
@ -204,7 +204,7 @@ proc broadcastBlockPresence*(
|
|||
if id notin b.peers:
|
||||
return
|
||||
|
||||
trace "Sending presence to peer", peer = id
|
||||
trace "Sending presence to peer", peer = id, items = presence.len
|
||||
b.peers.withValue(id, peer):
|
||||
peer[].broadcast(Message(blockPresences: @presence))
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -12,13 +12,13 @@ import pkg/chronicles
|
|||
import pkg/protobuf_serialization
|
||||
import pkg/libp2p
|
||||
|
||||
import ./protobuf/blockexc
|
||||
import ../protobuf/blockexc
|
||||
|
||||
logScope:
|
||||
topics = "dagger blockexc networkpeer"
|
||||
topics = "codex blockexc networkpeer"
|
||||
|
||||
const
|
||||
MaxMessageSize = 100 * 1024 * 1024 # manifest files can be big
|
||||
MaxMessageSize = 100 * 1 shl 20 # manifest files can be big
|
||||
|
||||
type
|
||||
RPCHandler* = proc(peer: NetworkPeer, msg: Message): Future[void] {.gcsafe.}
|
||||
|
@ -38,7 +38,7 @@ proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} =
|
|||
return
|
||||
|
||||
try:
|
||||
while not conn.atEof:
|
||||
while not conn.atEof or not conn.closed:
|
||||
let
|
||||
data = await conn.readLp(MaxMessageSize)
|
||||
msg: Message = Protobuf.decode(data, Message)
|
|
@ -0,0 +1,4 @@
|
|||
import ./peers/peerctxstore
|
||||
import ./peers/peercontext
|
||||
|
||||
export peerctxstore, peercontext
|
|
@ -1,3 +1,12 @@
|
|||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
import std/tables
|
||||
import pkg/libp2p
|
||||
|
@ -5,9 +14,9 @@ import pkg/chronos
|
|||
import pkg/nitro
|
||||
import pkg/questionable
|
||||
|
||||
import ./protobuf/blockexc
|
||||
import ./protobuf/payments
|
||||
import ./protobuf/presence
|
||||
import ../protobuf/blockexc
|
||||
import ../protobuf/payments
|
||||
import ../protobuf/presence
|
||||
|
||||
export payments, nitro
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -21,11 +21,11 @@ import pkg/libp2p
|
|||
|
||||
import ../protobuf/blockexc
|
||||
|
||||
import ../peercontext
|
||||
import ./peercontext
|
||||
export peercontext
|
||||
|
||||
logScope:
|
||||
topics = "dagger blockexc peerctxstore"
|
||||
topics = "codex blockexc peerctxstore"
|
||||
|
||||
type
|
||||
PeerCtxStore* = ref object of RootObj
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -59,7 +59,7 @@ template EmptyDigests*: untyped =
|
|||
.get()
|
||||
}.toTable,
|
||||
CIDv1: {
|
||||
multiCodec("sha2-256"): EmptyCid[CIDv0]
|
||||
multiCodec("sha2-256"): EmptyCid[CIDv1]
|
||||
.catch
|
||||
.get()[multiCodec("sha2-256")]
|
||||
.catch
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -33,26 +33,26 @@ import ./discovery
|
|||
import ./contracts
|
||||
|
||||
type
|
||||
DaggerServer* = ref object
|
||||
CodexServer* = ref object
|
||||
runHandle: Future[void]
|
||||
config: DaggerConf
|
||||
config: CodexConf
|
||||
restServer: RestServerRef
|
||||
daggerNode: DaggerNodeRef
|
||||
codexNode: CodexNodeRef
|
||||
|
||||
proc start*(s: DaggerServer) {.async.} =
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
s.restServer.start()
|
||||
await s.daggerNode.start()
|
||||
await s.codexNode.start()
|
||||
|
||||
s.runHandle = newFuture[void]()
|
||||
await s.runHandle
|
||||
|
||||
proc stop*(s: DaggerServer) {.async.} =
|
||||
proc stop*(s: CodexServer) {.async.} =
|
||||
await allFuturesThrowing(
|
||||
s.restServer.stop(), s.daggerNode.stop())
|
||||
s.restServer.stop(), s.codexNode.stop())
|
||||
|
||||
s.runHandle.complete()
|
||||
|
||||
proc new*(T: type DaggerServer, config: DaggerConf): T =
|
||||
proc new*(T: type CodexServer, config: CodexConf): T =
|
||||
|
||||
const SafePermissions = {UserRead, UserWrite}
|
||||
let
|
||||
|
@ -130,9 +130,9 @@ proc new*(T: type DaggerServer, config: DaggerConf): T =
|
|||
config.ethDeployment,
|
||||
config.ethAccount
|
||||
)
|
||||
daggerNode = DaggerNodeRef.new(switch, store, engine, erasure, blockDiscovery, contracts)
|
||||
codexNode = CodexNodeRef.new(switch, store, engine, erasure, blockDiscovery, contracts)
|
||||
restServer = RestServerRef.new(
|
||||
daggerNode.initRestApi(config),
|
||||
codexNode.initRestApi(config),
|
||||
initTAddress("127.0.0.1" , config.apiPort),
|
||||
bufferSize = (1024 * 64),
|
||||
maxRequestBodySize = int.high)
|
||||
|
@ -141,6 +141,6 @@ proc new*(T: type DaggerServer, config: DaggerConf): T =
|
|||
switch.mount(network)
|
||||
T(
|
||||
config: config,
|
||||
daggerNode: daggerNode,
|
||||
codexNode: codexNode,
|
||||
restServer: restServer,
|
||||
)
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -44,7 +44,7 @@ type
|
|||
Json = "json"
|
||||
None = "none"
|
||||
|
||||
DaggerConf* = object
|
||||
CodexConf* = object
|
||||
logLevel* {.
|
||||
defaultValue: LogLevel.INFO
|
||||
desc: "Sets the log level",
|
||||
|
@ -74,7 +74,7 @@ type
|
|||
name: "metrics-port" }: Port
|
||||
|
||||
dataDir* {.
|
||||
desc: "The directory where dagger will store configuration and data."
|
||||
desc: "The directory where codex will store configuration and data."
|
||||
defaultValue: defaultDataDir()
|
||||
defaultValueDesc: ""
|
||||
abbr: "d"
|
||||
|
@ -124,7 +124,7 @@ type
|
|||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Dagger"
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
|
@ -171,20 +171,20 @@ const
|
|||
nimBanner* = staticExec("nim --version | grep Version")
|
||||
|
||||
#TODO add versionMajor, Minor & Fix when we switch to semver
|
||||
daggerVersion* = gitRevision
|
||||
codexVersion* = gitRevision
|
||||
|
||||
daggerFullVersion* =
|
||||
"Dagger build " & daggerVersion & "\p" &
|
||||
codexFullVersion* =
|
||||
"Codex build " & codexVersion & "\p" &
|
||||
nimBanner
|
||||
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Dagger"
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Dagger"
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "dagger"
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
|
@ -246,7 +246,7 @@ proc stripAnsi(v: string): string =
|
|||
|
||||
res
|
||||
|
||||
proc setupLogging*(conf: DaggerConf) =
|
||||
proc setupLogging*(conf: CodexConf) =
|
||||
when defaultChroniclesStream.outputs.type.arity != 2:
|
||||
warn "Logging configuration options not enabled in the current build"
|
||||
else:
|
||||
|
@ -283,7 +283,7 @@ proc setupLogging*(conf: DaggerConf) =
|
|||
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
proc setupMetrics*(config: DaggerConf) =
|
||||
proc setupMetrics*(config: CodexConf) =
|
||||
if config.metricsEnabled:
|
||||
let metricsAddress = config.metricsAddress
|
||||
notice "Starting metrics HTTP server",
|
|
@ -1,13 +1,13 @@
|
|||
Dagger Contracts in Nim
|
||||
Codex Contracts in Nim
|
||||
=======================
|
||||
|
||||
Nim API for the [Dagger smart contracts][1].
|
||||
Nim API for the [Codex smart contracts][1].
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For a global overview of the steps involved in starting and fulfilling a
|
||||
storage contract, see [Dagger Contracts][1].
|
||||
storage contract, see [Codex Contracts][1].
|
||||
|
||||
Smart contract
|
||||
--------------
|
||||
|
@ -15,7 +15,7 @@ Smart contract
|
|||
Connecting to the smart contract on an Ethereum node:
|
||||
|
||||
```nim
|
||||
import dagger/contracts
|
||||
import codex/contracts
|
||||
import ethers
|
||||
|
||||
let address = # fill in address where the contract was deployed
|
||||
|
@ -178,4 +178,4 @@ await storage
|
|||
|
||||
[1]: https://github.com/status-im/dagger-contracts/
|
||||
[2]: https://ethereum.org/en/developers/docs/standards/tokens/erc-20/
|
||||
[3]: https://github.com/status-im/dagger-research/blob/main/design/storage-proof-timing.md
|
||||
[3]: https://github.com/status-im/codex-research/blob/main/design/storage-proof-timing.md
|
|
@ -1,5 +1,5 @@
|
|||
import pkg/ethers
|
||||
import ../por/timing/proofs
|
||||
import ../storageproofs/timing/proofs
|
||||
import ./storage
|
||||
|
||||
export proofs
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -7,12 +7,17 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/algorithm
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2p/signed_envelope
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/stew/shims/net
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
|
||||
import ./rng
|
||||
|
@ -20,6 +25,10 @@ import ./errors
|
|||
|
||||
export discv5
|
||||
|
||||
# TODO: If generics in methods had not been
|
||||
# deprecated, this could have been implemented
|
||||
# much more elegantly.
|
||||
|
||||
type
|
||||
Discovery* = ref object of RootObj
|
||||
protocol: discv5.Protocol
|
||||
|
@ -42,21 +51,31 @@ proc new*(
|
|||
),
|
||||
localInfo: localInfo)
|
||||
|
||||
proc toNodeId*(cid: Cid): NodeId =
|
||||
## Cid to discovery id
|
||||
##
|
||||
|
||||
readUintBE[256](keccak256.digest(cid.data.buffer).data)
|
||||
|
||||
proc toNodeId*(host: ca.Address): NodeId =
|
||||
## Eth address to discovery id
|
||||
##
|
||||
|
||||
readUintBE[256](keccak256.digest(host.toArray).data)
|
||||
|
||||
proc findPeer*(
|
||||
d: Discovery,
|
||||
peerId: PeerID): Future[?PeerRecord] {.async.} =
|
||||
let node = await d.protocol.resolve(toNodeId(peerId))
|
||||
let
|
||||
node = await d.protocol.resolve(toNodeId(peerId))
|
||||
|
||||
return
|
||||
if node.isSome():
|
||||
some(node.get().record.data)
|
||||
else:
|
||||
none(PeerRecord)
|
||||
|
||||
proc toDiscoveryId*(cid: Cid): NodeId =
|
||||
## To discovery id
|
||||
readUintBE[256](keccak256.digest(cid.data.buffer).data)
|
||||
|
||||
method findBlockProviders*(
|
||||
method find*(
|
||||
d: Discovery,
|
||||
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
## Find block providers
|
||||
|
@ -64,19 +83,19 @@ method findBlockProviders*(
|
|||
|
||||
trace "Finding providers for block", cid = $cid
|
||||
without providers =?
|
||||
(await d.protocol.getProviders(cid.toDiscoveryId())).mapFailure, error:
|
||||
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
||||
trace "Error finding providers for block", cid = $cid, error = error.msg
|
||||
|
||||
return providers
|
||||
|
||||
method provideBlock*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
||||
## Provide a bock Cid
|
||||
##
|
||||
|
||||
trace "Providing block", cid = $cid
|
||||
let
|
||||
nodes = await d.protocol.addProvider(
|
||||
cid.toDiscoveryId(),
|
||||
cid.toNodeId(),
|
||||
d.localInfo.signedPeerRecord)
|
||||
|
||||
if nodes.len <= 0:
|
||||
|
@ -84,6 +103,39 @@ method provideBlock*(d: Discovery, cid: Cid) {.async, base.} =
|
|||
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
|
||||
method find*(
|
||||
d: Discovery,
|
||||
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
|
||||
## Find host providers
|
||||
##
|
||||
|
||||
trace "Finding providers for host", host = $host
|
||||
without var providers =?
|
||||
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
|
||||
trace "Error finding providers for host", host = $host, exc = error.msg
|
||||
return
|
||||
|
||||
if providers.len <= 0:
|
||||
trace "No providers found", host = $host
|
||||
return
|
||||
|
||||
providers.sort do(a, b: SignedPeerRecord) -> int:
|
||||
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
||||
|
||||
return providers
|
||||
|
||||
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
||||
## Provide hosts
|
||||
##
|
||||
|
||||
trace "Providing host", host = $host
|
||||
let
|
||||
nodes = await d.protocol.addProvider(
|
||||
host.toNodeId(),
|
||||
d.localInfo.signedPeerRecord)
|
||||
if nodes.len > 0:
|
||||
trace "Provided to nodes", nodes = nodes.len
|
||||
|
||||
proc start*(d: Discovery) {.async.} =
|
||||
d.protocol.updateRecord(d.localInfo.signedPeerRecord).expect("updating SPR")
|
||||
d.protocol.open()
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -26,7 +26,7 @@ import ./backend
|
|||
export backend
|
||||
|
||||
logScope:
|
||||
topics = "dagger erasure"
|
||||
topics = "codex erasure"
|
||||
|
||||
type
|
||||
## Encode a manifest into one that is erasure protected.
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -10,12 +10,12 @@
|
|||
import pkg/stew/results
|
||||
|
||||
type
|
||||
DaggerError* = object of CatchableError # base dagger error
|
||||
DaggerResult*[T] = Result[T, ref DaggerError]
|
||||
CodexError* = object of CatchableError # base codex error
|
||||
CodexResult*[T] = Result[T, ref CodexError]
|
||||
|
||||
template mapFailure*(
|
||||
exp: untyped,
|
||||
exc: typed = type DaggerError): untyped =
|
||||
exc: typed = type CodexError): untyped =
|
||||
## Convert `Result[T, E]` to `Result[E, ref CatchableError]`
|
||||
##
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -32,12 +32,15 @@ import ./discovery
|
|||
import ./contracts
|
||||
|
||||
logScope:
|
||||
topics = "dagger node"
|
||||
topics = "codex node"
|
||||
|
||||
const
|
||||
PrefetchBatch = 100
|
||||
|
||||
type
|
||||
DaggerError = object of CatchableError
|
||||
CodexError = object of CatchableError
|
||||
|
||||
DaggerNodeRef* = ref object
|
||||
CodexNodeRef* = ref object
|
||||
switch*: Switch
|
||||
networkId*: PeerID
|
||||
blockStore*: BlockStore
|
||||
|
@ -46,7 +49,7 @@ type
|
|||
discovery*: Discovery
|
||||
contracts*: ?ContractInteractions
|
||||
|
||||
proc start*(node: DaggerNodeRef) {.async.} =
|
||||
proc start*(node: CodexNodeRef) {.async.} =
|
||||
if not node.switch.isNil:
|
||||
await node.switch.start()
|
||||
|
||||
|
@ -63,9 +66,9 @@ proc start*(node: DaggerNodeRef) {.async.} =
|
|||
await contracts.start()
|
||||
|
||||
node.networkId = node.switch.peerInfo.peerId
|
||||
notice "Started dagger node", id = $node.networkId, addrs = node.switch.peerInfo.addrs
|
||||
notice "Started codex node", id = $node.networkId, addrs = node.switch.peerInfo.addrs
|
||||
|
||||
proc stop*(node: DaggerNodeRef) {.async.} =
|
||||
proc stop*(node: CodexNodeRef) {.async.} =
|
||||
trace "Stopping node"
|
||||
|
||||
if not node.engine.isNil:
|
||||
|
@ -84,28 +87,28 @@ proc stop*(node: DaggerNodeRef) {.async.} =
|
|||
await contracts.stop()
|
||||
|
||||
proc findPeer*(
|
||||
node: DaggerNodeRef,
|
||||
node: CodexNodeRef,
|
||||
peerId: PeerID): Future[?PeerRecord] {.async.} =
|
||||
return await node.discovery.findPeer(peerId)
|
||||
|
||||
proc connect*(
|
||||
node: DaggerNodeRef,
|
||||
node: CodexNodeRef,
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]): Future[void] =
|
||||
node.switch.connect(peerId, addrs)
|
||||
|
||||
proc retrieve*(
|
||||
node: DaggerNodeRef,
|
||||
node: CodexNodeRef,
|
||||
cid: Cid): Future[?!LPStream] {.async.} =
|
||||
|
||||
trace "Received retrieval request", cid
|
||||
without blk =? await node.blockStore.getBlock(cid):
|
||||
return failure(
|
||||
newException(DaggerError, "Couldn't retrieve block for Cid!"))
|
||||
newException(CodexError, "Couldn't retrieve block for Cid!"))
|
||||
|
||||
without mc =? blk.cid.contentType():
|
||||
return failure(
|
||||
newException(DaggerError, "Couldn't identify Cid!"))
|
||||
newException(CodexError, "Couldn't identify Cid!"))
|
||||
|
||||
# if we got a manifest, stream the blocks
|
||||
if $mc in ManifestContainers:
|
||||
|
@ -128,8 +131,12 @@ proc retrieve*(
|
|||
## Initiates requests to all blocks in the manifest
|
||||
##
|
||||
try:
|
||||
discard await allFinished(
|
||||
manifest.mapIt( node.blockStore.getBlock( it ) ))
|
||||
let
|
||||
batch = manifest.blocks.len div PrefetchBatch
|
||||
trace "Prefetching in batches of", batch
|
||||
for blks in manifest.blocks.distribute(batch, true):
|
||||
discard await allFinished(
|
||||
blks.mapIt( node.blockStore.getBlock( it ) ))
|
||||
except CatchableError as exc:
|
||||
trace "Exception prefetching blocks", exc = exc.msg
|
||||
|
||||
|
@ -152,7 +159,7 @@ proc retrieve*(
|
|||
return LPStream(stream).success()
|
||||
|
||||
proc store*(
|
||||
node: DaggerNodeRef,
|
||||
node: CodexNodeRef,
|
||||
stream: LPStream): Future[?!Cid] {.async.} =
|
||||
trace "Storing data"
|
||||
|
||||
|
@ -186,7 +193,7 @@ proc store*(
|
|||
# Generate manifest
|
||||
without data =? blockManifest.encode():
|
||||
return failure(
|
||||
newException(DaggerError, "Could not generate dataset manifest!"))
|
||||
newException(CodexError, "Could not generate dataset manifest!"))
|
||||
|
||||
# Store as a dag-pb block
|
||||
without manifest =? bt.Block.new(data = data, codec = DagPBCodec):
|
||||
|
@ -207,7 +214,7 @@ proc store*(
|
|||
|
||||
return manifest.cid.success
|
||||
|
||||
proc requestStorage*(self: DaggerNodeRef,
|
||||
proc requestStorage*(self: CodexNodeRef,
|
||||
cid: Cid,
|
||||
duration: UInt256,
|
||||
nodes: uint,
|
||||
|
@ -289,7 +296,7 @@ proc requestStorage*(self: DaggerNodeRef,
|
|||
return success purchase.id
|
||||
|
||||
proc new*(
|
||||
T: type DaggerNodeRef,
|
||||
T: type CodexNodeRef,
|
||||
switch: Switch,
|
||||
store: BlockStore,
|
||||
engine: BlockExcEngine,
|
|
@ -2,11 +2,11 @@ import std/sets
|
|||
import pkg/upraises
|
||||
import pkg/questionable
|
||||
import pkg/chronicles
|
||||
import ./por/timing/proofs
|
||||
import ./storageproofs
|
||||
import ./clock
|
||||
|
||||
export sets
|
||||
export proofs
|
||||
export storageproofs
|
||||
|
||||
type
|
||||
Proving* = ref object
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -47,10 +47,10 @@ proc decodeString(T: type Cid, value: string): Result[Cid, cstring] =
|
|||
.init(value)
|
||||
.mapErr do(e: CidError) -> cstring:
|
||||
case e
|
||||
of CidError.Incorrect: "Incorrect Cid"
|
||||
of CidError.Unsupported: "Unsupported Cid"
|
||||
of CidError.Overrun: "Overrun Cid"
|
||||
else: "Error parsing Cid"
|
||||
of CidError.Incorrect: "Incorrect Cid".cstring
|
||||
of CidError.Unsupported: "Unsupported Cid".cstring
|
||||
of CidError.Overrun: "Overrun Cid".cstring
|
||||
else: "Error parsing Cid".cstring
|
||||
|
||||
proc encodeString(peerId: PeerID): Result[string, cstring] =
|
||||
ok($peerId)
|
||||
|
@ -102,11 +102,11 @@ proc decodeString(_: type array[32, byte],
|
|||
except ValueError as e:
|
||||
err e.msg.cstring
|
||||
|
||||
proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
||||
proc initRestApi*(node: CodexNodeRef, conf: CodexConf): RestRouter =
|
||||
var router = RestRouter.init(validate)
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/dagger/v1/connect/{peerId}") do (
|
||||
"/api/codex/v1/connect/{peerId}") do (
|
||||
peerId: PeerID,
|
||||
addrs: seq[MultiAddress]) -> RestApiResponse:
|
||||
## Connect to a peer
|
||||
|
@ -140,7 +140,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/dagger/v1/download/{id}") do (
|
||||
"/api/codex/v1/download/{id}") do (
|
||||
id: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
||||
## Download a file from the node in a streaming
|
||||
## manner
|
||||
|
@ -185,7 +185,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/dagger/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
||||
"/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse:
|
||||
## Create a request for storage
|
||||
##
|
||||
## cid - the cid of a previously uploaded dataset
|
||||
|
@ -216,7 +216,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/dagger/v1/upload") do (
|
||||
"/api/codex/v1/upload") do (
|
||||
) -> RestApiResponse:
|
||||
## Upload a file in a streamming manner
|
||||
##
|
||||
|
@ -273,7 +273,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/dagger/v1/info") do () -> RestApiResponse:
|
||||
"/api/codex/v1/info") do () -> RestApiResponse:
|
||||
## Print rudimentary node information
|
||||
##
|
||||
|
||||
|
@ -288,7 +288,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/dagger/v1/sales/availability") do () -> RestApiResponse:
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Returns storage that is for sale
|
||||
|
||||
without contracts =? node.contracts:
|
||||
|
@ -299,7 +299,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.rawApi(
|
||||
MethodPost,
|
||||
"/api/dagger/v1/sales/availability") do () -> RestApiResponse:
|
||||
"/api/codex/v1/sales/availability") do () -> RestApiResponse:
|
||||
## Add available storage to sell
|
||||
##
|
||||
## size - size of available storage in bytes
|
||||
|
@ -321,7 +321,7 @@ proc initRestApi*(node: DaggerNodeRef, conf: DaggerConf): RestRouter =
|
|||
|
||||
router.api(
|
||||
MethodGet,
|
||||
"/api/dagger/v1/storage/purchases/{id}") do (
|
||||
"/api/codex/v1/storage/purchases/{id}") do (
|
||||
id: array[32, byte]) -> RestApiResponse:
|
||||
|
||||
without contracts =? node.contracts:
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -0,0 +1,7 @@
|
|||
import ./storageproofs/por
|
||||
import ./storageproofs/timing
|
||||
import ./storageproofs/stpstore
|
||||
import ./storageproofs/stpnetwork
|
||||
import ./storageproofs/stpproto
|
||||
|
||||
export por, timing, stpstore, stpnetwork, stpproto
|
|
@ -0,0 +1,4 @@
|
|||
import ./por/serialization
|
||||
import ./por/por
|
||||
|
||||
export por, serialization
|
|
@ -47,7 +47,7 @@
|
|||
# Our implementation uses additive cyclic groups instead of the multiplicative
|
||||
# cyclic group in the paper, thus changing the name of the group operation as in
|
||||
# blscurve and blst. Thus, point multiplication becomes point addition, and scalar
|
||||
# exponentiation becomes scalar multiplication.
|
||||
# exponentiation becomes scalar multiplicaiton.
|
||||
#
|
||||
# Number of operations:
|
||||
# The following table summarizes the number of operations in different phases
|
||||
|
@ -85,134 +85,184 @@
|
|||
# q * (8 + 48) bytes
|
||||
# The size of the proof is instead
|
||||
# s * 32 + 48 bytes
|
||||
import std/endians
|
||||
|
||||
# Select backend to use
|
||||
# - blst supports only the BLS12-381 curve
|
||||
# - constantine is more experimental, supports BLS and BN curves as well
|
||||
# As of now configuration of backends is in the backend_* file itself
|
||||
when defined(por_backend_constantine):
|
||||
import ./backends/backend_constantine
|
||||
import ../backends/backend_constantine
|
||||
else:
|
||||
import ./backends/backend_blst
|
||||
import ../backends/backend_blst
|
||||
|
||||
import ../rng
|
||||
import endians
|
||||
import pkg/chronos
|
||||
import pkg/blscurve
|
||||
import pkg/blscurve/blst/blst_abi
|
||||
|
||||
import ../../rng
|
||||
import ../../streams
|
||||
|
||||
# sector size in bytes. Must be smaller than the subgroup order r
|
||||
# which is 255 bits long for BLS12-381
|
||||
const bytespersector = 31
|
||||
const
|
||||
BytesPerSector* = 31
|
||||
|
||||
# length in bytes of the unique (random) name
|
||||
const namelen = 512
|
||||
# length in bytes of the unique (random) name
|
||||
Namelen = 512
|
||||
|
||||
|
||||
type
|
||||
# a single sector
|
||||
ZChar = array[bytespersector, byte]
|
||||
ZChar* = array[BytesPerSector, byte]
|
||||
|
||||
# secret key combining the metadata signing key and the POR generation key
|
||||
SecretKey = object
|
||||
signkey: ec_SecretKey
|
||||
key: ec_scalar
|
||||
SecretKey* = object
|
||||
signkey*: ec_SecretKey
|
||||
key*: ec_scalar
|
||||
|
||||
# public key combining the metadata signing key and the POR validation key
|
||||
PublicKey = object
|
||||
signkey: ec_PublicKey
|
||||
key: ec_p2
|
||||
PublicKey* = object
|
||||
signkey*: ec_PublicKey
|
||||
key*: ec_p2
|
||||
|
||||
# POR metadata (called "file tag t_0" in the original paper)
|
||||
TauZero = object
|
||||
name: array[namelen, byte]
|
||||
n: int64
|
||||
u: seq[ec_p1]
|
||||
TauZero* = object
|
||||
name*: array[Namelen, byte]
|
||||
n*: int64
|
||||
u*: seq[ec_p1]
|
||||
|
||||
# signed POR metadata (called "signed file tag t" in the original paper)
|
||||
Tau = object
|
||||
t: TauZero
|
||||
signature: array[96, byte]
|
||||
Tau* = object
|
||||
t*: TauZero
|
||||
signature*: array[96, byte]
|
||||
|
||||
Proof* = object
|
||||
mu*: seq[blst_scalar]
|
||||
sigma*: blst_p1
|
||||
|
||||
# PoR query element
|
||||
QElement = object
|
||||
I: int64
|
||||
V: ec_scalar
|
||||
QElement* = object
|
||||
I*: int64
|
||||
V*: ec_scalar
|
||||
|
||||
PoR* = object
|
||||
ssk*: SecretKey
|
||||
spk*: PublicKey
|
||||
tau*: Tau
|
||||
authenticators*: seq[ec_p1]
|
||||
|
||||
proc fromBytesBE(a: array[32, byte]): ec_scalar =
|
||||
## Convert data to blst native form
|
||||
## Convert data to native form
|
||||
##
|
||||
|
||||
ec_scalar_from_bendian(result, a)
|
||||
doAssert(ec_scalar_fr_check(result).bool)
|
||||
|
||||
proc fromBytesBE(a: openArray[byte]): ec_scalar =
|
||||
## Convert data to blst native form
|
||||
## Convert data to native form
|
||||
##
|
||||
|
||||
var b: array[32, byte]
|
||||
doAssert(a.len <= b.len)
|
||||
|
||||
let d = b.len - a.len
|
||||
for i in 0 ..< a.len:
|
||||
for i in 0..<a.len:
|
||||
b[i+d] = a[i]
|
||||
|
||||
ec_scalar_from_bendian(result, b)
|
||||
doAssert(ec_scalar_fr_check(result).bool)
|
||||
|
||||
proc getSector(f: File, blockid: int64, sectorid: int64, spb: int64): ZChar =
|
||||
proc getSector(
|
||||
stream: SeekableStream,
|
||||
blockId: int64,
|
||||
sectorId: int64,
|
||||
spb: int64): Future[ZChar] {.async.} =
|
||||
## Read file sector at given <blockid, sectorid> postion
|
||||
f.setFilePos((blockid * spb + sectorid) * sizeof(result))
|
||||
let r = f.readBytes(result, 0, sizeof(result))
|
||||
##
|
||||
|
||||
var res: ZChar
|
||||
stream.setPos(((blockid * spb + sectorid) * ZChar.len).int)
|
||||
discard await stream.readOnce(addr res[0], ZChar.len)
|
||||
return res
|
||||
|
||||
proc rndScalar(scalar: var ec_scalar): void =
|
||||
## Generate random scalar within the subroup order r
|
||||
var scal{.noInit.}: array[32, byte]
|
||||
##
|
||||
|
||||
var scal {.noInit.}: array[32, byte]
|
||||
|
||||
while true:
|
||||
for val in scal.mitems:
|
||||
val = byte Rng.instance.rand(0xFF)
|
||||
|
||||
scalar.ec_scalar_from_bendian(scal)
|
||||
if ec_scalar_fr_check(scalar).bool:
|
||||
break
|
||||
|
||||
proc rndP2(x: var ec_p2, scalar: var ec_scalar): void =
|
||||
## Generate random point on G2
|
||||
##
|
||||
|
||||
x.ec_p2_from_affine(EC_G2) # init from generator
|
||||
scalar.rndScalar()
|
||||
x.ec_p2_mult(x, scalar, 255)
|
||||
|
||||
proc rndP1(x: var ec_p1, scalar: var ec_scalar): void =
|
||||
## Generate random point on G1
|
||||
##
|
||||
|
||||
x.ec_p1_from_affine(EC_G1) # init from generator
|
||||
scalar.rndScalar()
|
||||
x.ec_p1_mult(x, scalar, 255)
|
||||
|
||||
let posKeygen = rndP2
|
||||
template posKeygen(x: var ec_p2, scalar: var ec_scalar): void =
|
||||
## Generate POS key pair
|
||||
##
|
||||
|
||||
proc keygen*(): (PublicKey, SecretKey) =
|
||||
rndP2(x, scalar)
|
||||
|
||||
proc keyGen*(): (PublicKey, SecretKey) =
|
||||
## Generate key pair for signing metadata and for POS tags
|
||||
var pk: PublicKey
|
||||
var sk: SecretKey
|
||||
var ikm: array[32, byte]
|
||||
##
|
||||
|
||||
var
|
||||
pk: PublicKey
|
||||
sk: SecretKey
|
||||
ikm: array[32, byte]
|
||||
|
||||
for b in ikm.mitems:
|
||||
b = byte Rng.instance.rand(0xFF)
|
||||
|
||||
doAssert ikm.ec_keygen(pk.signkey, sk.signkey)
|
||||
|
||||
posKeygen(pk.key, sk.key)
|
||||
return (pk, sk)
|
||||
|
||||
proc split(f: File, s: int64): int64 =
|
||||
proc sectorsCount(stream: SeekableStream, s: int64): int64 =
|
||||
## Calculate number of blocks for a file
|
||||
let size = f.getFileSize()
|
||||
let n = ((size - 1) div (s * sizeof(ZChar))) + 1
|
||||
echo "File size=", size, " bytes",
|
||||
", blocks=", n,
|
||||
", sectors/block=", $s,
|
||||
", sectorsize=", $sizeof(ZChar), " bytes"
|
||||
##
|
||||
|
||||
let
|
||||
size = stream.size()
|
||||
n = ((size - 1) div (s * sizeof(ZChar))) + 1
|
||||
# debugEcho "File size=", size, " bytes",
|
||||
# ", blocks=", n,
|
||||
# ", sectors/block=", $s,
|
||||
# ", sectorsize=", $sizeof(ZChar), " bytes"
|
||||
|
||||
return n
|
||||
|
||||
proc hashToG1(msg: openArray[byte]): ec_p1 =
|
||||
## Hash to curve with Dagger specific domain separation
|
||||
##
|
||||
|
||||
const dst = "DAGGER-PROOF-OF-CONCEPT"
|
||||
result.ec_hash_to_g1(msg, dst, aug = "")
|
||||
|
||||
proc hashNameI(name: array[namelen, byte], i: int64): ec_p1 =
|
||||
proc hashNameI(name: array[Namelen, byte], i: int64): ec_p1 =
|
||||
## Calculate unique filname and block index based hash
|
||||
##
|
||||
|
||||
# # naive implementation, hashing a long string representation
|
||||
# # such as "[255, 242, 23]1"
|
||||
|
@ -224,20 +274,33 @@ proc hashNameI(name: array[namelen, byte], i: int64): ec_p1 =
|
|||
bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i))
|
||||
return hashToG1(namei)
|
||||
|
||||
proc generateAuthenticatorNaive(i: int64, s: int64, t: TauZero, f: File, ssk: SecretKey): ec_p1 =
|
||||
proc generateAuthenticatorNaive(
|
||||
stream: SeekableStream,
|
||||
ssk: SecretKey,
|
||||
i: int64,
|
||||
s: int64,
|
||||
t: TauZero): Future[ec_p1] {.async.} =
|
||||
## Naive implementation of authenticator as in the S&W paper.
|
||||
## With the paper's multiplicative notation:
|
||||
## \sigmai=\(H(file||i)\cdot\prod{j=0}^{s-1}{uj^{m[i][j]}})^{\alpha}
|
||||
##
|
||||
|
||||
var sum: ec_p1
|
||||
for j in 0 ..< s:
|
||||
for j in 0..<s:
|
||||
var prod: ec_p1
|
||||
prod.ec_p1_mult(t.u[j], fromBytesBE(getSector(f, i, j, s)), 255)
|
||||
prod.ec_p1_mult(t.u[j], fromBytesBE((await stream.getSector(i, j, s))), 255)
|
||||
sum.ec_p1_add_or_double(sum, prod)
|
||||
|
||||
ec_p1_add_or_double(result, hashNameI(t.name, i), sum)
|
||||
result.ec_p1_mult(result, ssk.key, 255)
|
||||
|
||||
proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[ec_scalar], f: File, ssk: SecretKey): ec_p1 =
|
||||
proc generateAuthenticatorOpt(
|
||||
stream: SeekableStream,
|
||||
ssk: SecretKey,
|
||||
i: int64,
|
||||
s: int64,
|
||||
t: TauZero,
|
||||
ubase: seq[blst_scalar]): Future[ec_p1] {.async.} =
|
||||
## Optimized implementation of authenticator generation
|
||||
## This implementation is reduces the number of scalar multiplications
|
||||
## from s+1 to 1+1 , using knowledge about the scalars (r_j)
|
||||
|
@ -245,12 +308,14 @@ proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[e
|
|||
##
|
||||
## With the paper's multiplicative notation, we use:
|
||||
## (H(file||i)\cdot g^{\sum{j=0}^{s-1}{r_j \cdot m[i][j]}})^{\alpha}
|
||||
##
|
||||
|
||||
var sum: ec_fr
|
||||
var sums: ec_scalar
|
||||
for j in 0 ..< s:
|
||||
for j in 0..<s:
|
||||
var a, b, x: ec_fr
|
||||
a.ec_fr_from_scalar(ubase[j])
|
||||
b.ec_fr_from_scalar(fromBytesBE(getSector(f, i, j, s)))
|
||||
b.ec_fr_from_scalar(fromBytesBE((await stream.getSector(i, j, s))))
|
||||
x.ec_fr_mul(a, b)
|
||||
sum.ec_fr_add(sum, x)
|
||||
sums.ec_scalar_from_fr(sum)
|
||||
|
@ -261,104 +326,103 @@ proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[e
|
|||
result.ec_p1_add_or_double(result, hashNameI(t.name, i))
|
||||
result.ec_p1_mult(result, ssk.key, 255)
|
||||
|
||||
proc generateAuthenticator(i: int64, s: int64, t: TauZero, ubase: openArray[ec_scalar], f: File, ssk: SecretKey): ec_p1 =
|
||||
proc generateAuthenticator(
|
||||
stream: SeekableStream,
|
||||
ssk: SecretKey,
|
||||
i: int64,
|
||||
s: int64,
|
||||
t: TauZero,
|
||||
ubase: seq[ec_scalar]): Future[ec_p1] =
|
||||
## Wrapper to select tag generator implementation
|
||||
##
|
||||
|
||||
# let a = generateAuthenticatorNaive(i, s, t, f, ssk)
|
||||
let b = generateAuthenticatorOpt(i, s, t, ubase, f, ssk)
|
||||
# let a = generateAuthenticatorNaive(stream, ssk, i, s, t, ubase)
|
||||
return generateAuthenticatorOpt(stream, ssk, i, s, t, ubase)
|
||||
# doAssert(a.ec_p1_is_equal(b).bool)
|
||||
return b
|
||||
|
||||
proc setup*(ssk: SecretKey, s:int64, filename: string): (Tau, seq[ec_p1]) =
|
||||
## Set up the POR scheme by generating tags and metadata
|
||||
let file = open(filename)
|
||||
let n = split(file, s)
|
||||
var t = TauZero(n: n)
|
||||
proc generateQuery*(tau: Tau, l: int): seq[QElement] =
|
||||
## Generata a random BLS query of given size
|
||||
##
|
||||
|
||||
# generate a random name
|
||||
for i in 0 ..< 512 :
|
||||
t.name[i] = byte Rng.instance.rand(0xFF)
|
||||
|
||||
# generate the coefficient vector for combining sectors of a block: U
|
||||
var ubase: seq[ec_scalar]
|
||||
for i in 0 ..< s :
|
||||
var
|
||||
u: ec_p1
|
||||
ub: ec_scalar
|
||||
rndP1(u, ub)
|
||||
t.u.add(u)
|
||||
ubase.add(ub)
|
||||
|
||||
#TODO: a better bytearray conversion of TauZero for the signature might be needed
|
||||
# the current conversion using $t might be architecture dependent and not unique
|
||||
let signature = ec_sign(ssk.signkey, $t)
|
||||
let tau = Tau(t: t, signature: signature.ec_export_raw())
|
||||
|
||||
#generate sigmas
|
||||
var sigmas: seq[ec_p1]
|
||||
for i in 0 ..< n :
|
||||
sigmas.add(generateAuthenticator(i, s, t, ubase, file, ssk))
|
||||
|
||||
file.close()
|
||||
result = (tau, sigmas)
|
||||
|
||||
proc generateQuery*(tau: Tau, spk: PublicKey, l: int): seq[QElement] =
|
||||
## Generata a random BLS query of given sizxe
|
||||
let n = tau.t.n # number of blocks
|
||||
|
||||
for i in 0 ..< l :
|
||||
for i in 0..<l:
|
||||
var q: QElement
|
||||
q.I = Rng.instance.rand(n-1) #TODO: dedup
|
||||
q.V.rndScalar() #TODO: fix range
|
||||
result.add(q)
|
||||
|
||||
proc generateProof*(q: openArray[QElement], authenticators: openArray[ec_p1], spk: PublicKey, s: int64, filename: string): (seq[ec_scalar], ec_p1) =
|
||||
proc generateProof*(
|
||||
stream: SeekableStream,
|
||||
q: seq[QElement],
|
||||
authenticators: seq[ec_p1],
|
||||
s: int64): Future[Proof] {.async.} =
|
||||
## Generata BLS proofs for a given query
|
||||
let file = open(filename)
|
||||
##
|
||||
|
||||
var
|
||||
mu: seq[ec_scalar]
|
||||
|
||||
for j in 0..<s:
|
||||
var
|
||||
muj: ec_fr
|
||||
|
||||
for qelem in q:
|
||||
let
|
||||
sect = fromBytesBE((await stream.getSector(qelem.I, j, s)))
|
||||
|
||||
var
|
||||
x, v, sector: ec_fr
|
||||
|
||||
var mu: seq[ec_scalar]
|
||||
for j in 0 ..< s :
|
||||
var muj: ec_fr
|
||||
for qelem in q :
|
||||
var x, v, sector: ec_fr
|
||||
let sect = fromBytesBE(getSector(file, qelem.I, j, s))
|
||||
sector.ec_fr_from_scalar(sect)
|
||||
v.ec_fr_from_scalar(qelem.V)
|
||||
x.ec_fr_mul(v, sector)
|
||||
muj.ec_fr_add(muj, x)
|
||||
var mujs: ec_scalar
|
||||
|
||||
var
|
||||
mujs: ec_scalar
|
||||
|
||||
mujs.ec_scalar_from_fr(muj)
|
||||
mu.add(mujs)
|
||||
|
||||
var sigma: ec_p1
|
||||
var
|
||||
sigma: ec_p1
|
||||
|
||||
for qelem in q:
|
||||
var prod: ec_p1
|
||||
var
|
||||
prod: ec_p1
|
||||
|
||||
prod.ec_p1_mult(authenticators[qelem.I], qelem.V, 255)
|
||||
sigma.ec_p1_add_or_double(sigma, prod)
|
||||
|
||||
file.close()
|
||||
return (mu, sigma)
|
||||
return Proof(mu: mu, sigma: sigma)
|
||||
|
||||
proc verifyProof*(tau: Tau, q: openArray[QElement], mus: openArray[ec_scalar], sigma: ec_p1, spk: PublicKey): bool =
|
||||
proc verifyProof*(
|
||||
self: PoR,
|
||||
q: seq[QElement],
|
||||
mus: seq[ec_scalar],
|
||||
sigma: ec_p1): bool =
|
||||
## Verify a BLS proof given a query
|
||||
##
|
||||
|
||||
# verify signature on Tau
|
||||
var signature: ec_signature
|
||||
if not signature.ec_from_bytes(tau.signature):
|
||||
if not signature.ec_from_bytes(self.tau.signature):
|
||||
return false
|
||||
if not ec_verify(spk.signkey, $tau.t, signature):
|
||||
|
||||
if not ec_verify(self.spk.signkey, $self.tau.t, signature):
|
||||
return false
|
||||
|
||||
var first: ec_p1
|
||||
for qelem in q :
|
||||
for qelem in q:
|
||||
var prod: ec_p1
|
||||
prod.ec_p1_mult(hashNameI(tau.t.name, qelem.I), qelem.V, 255)
|
||||
prod.ec_p1_mult(hashNameI(self.tau.t.name, qelem.I), qelem.V, 255)
|
||||
first.ec_p1_add_or_double(first, prod)
|
||||
doAssert(ec_p1_on_curve(first).bool)
|
||||
|
||||
let us = tau.t.u
|
||||
let us = self.tau.t.u
|
||||
var second: ec_p1
|
||||
for j in 0 ..< len(us) :
|
||||
for j in 0..<len(us):
|
||||
var prod: ec_p1
|
||||
prod.ec_p1_mult(us[j], mus[j], 255)
|
||||
second.ec_p1_add_or_double(second, prod)
|
||||
|
@ -367,7 +431,59 @@ proc verifyProof*(tau: Tau, q: openArray[QElement], mus: openArray[ec_scalar], s
|
|||
var sum: ec_p1
|
||||
sum.ec_p1_add_or_double(first, second)
|
||||
|
||||
var g{.noInit.}: ec_p2
|
||||
var g {.noInit.}: ec_p2
|
||||
g.ec_p2_from_affine(EC_G2)
|
||||
|
||||
return verifyPairings(sum, spk.key, sigma, g)
|
||||
return verifyPairings(sum, self.spk.key, sigma, g)
|
||||
|
||||
proc init*(
|
||||
T: type PoR,
|
||||
stream: SeekableStream,
|
||||
ssk: SecretKey,
|
||||
spk: PublicKey,
|
||||
blockSize: int64): Future[PoR] {.async.} =
|
||||
## Set up the POR scheme by generating tags and metadata
|
||||
##
|
||||
|
||||
doAssert(
|
||||
(blockSize mod BytesPerSector) == 0,
|
||||
"Block size should be divisible by `BytesPerSector`")
|
||||
|
||||
let
|
||||
s = blockSize div BytesPerSector
|
||||
n = stream.sectorsCount(s)
|
||||
|
||||
# generate a random name
|
||||
var t = TauZero(n: n)
|
||||
for i in 0..<Namelen:
|
||||
t.name[i] = byte Rng.instance.rand(0xFF)
|
||||
|
||||
# generate the coefficient vector for combining sectors of a block: U
|
||||
var ubase: seq[ec_scalar]
|
||||
for i in 0..<s:
|
||||
var
|
||||
u: ec_p1
|
||||
ub: ec_scalar
|
||||
|
||||
rndP1(u, ub)
|
||||
t.u.add(u)
|
||||
ubase.add(ub)
|
||||
|
||||
#TODO: a better bytearray conversion of TauZero for the signature might be needed
|
||||
# the current conversion using $t might be architecture dependent and not unique
|
||||
let
|
||||
signature = ec_sign(ssk.signkey, $t)
|
||||
tau = Tau(t: t, signature: signature.exportRaw())
|
||||
|
||||
# generate sigmas
|
||||
var
|
||||
sigmas: seq[ec_p1]
|
||||
|
||||
for i in 0..<n:
|
||||
sigmas.add((await stream.generateAuthenticator(ssk, i, s, t, ubase)))
|
||||
|
||||
return PoR(
|
||||
ssk: ssk,
|
||||
spk: spk,
|
||||
tau: tau,
|
||||
authenticators: sigmas)
|
|
@ -0,0 +1,3 @@
|
|||
import ./serialization/serialization
|
||||
|
||||
export serialization
|
|
@ -0,0 +1,33 @@
|
|||
syntax = "proto3";
|
||||
|
||||
message PoREnvelope {
|
||||
message TauZeroMessage {
|
||||
bytes name = 1;
|
||||
int64 n = 2;
|
||||
repeated bytes u = 3;
|
||||
}
|
||||
|
||||
message TauMessage {
|
||||
TauZeroMessage t = 1;
|
||||
bytes signature = 2;
|
||||
}
|
||||
|
||||
message PubKeyMessage {
|
||||
bytes signkey = 1;
|
||||
bytes key = 2;
|
||||
}
|
||||
|
||||
message PorMessage {
|
||||
TauMessage tau = 1;
|
||||
PubKeyMessage spk = 2;
|
||||
repeated bytes authenticators = 3;
|
||||
}
|
||||
|
||||
message ProofMessage {
|
||||
repeated bytes mu = 1;
|
||||
bytes sigma = 2;
|
||||
}
|
||||
|
||||
PorMessage por = 1;
|
||||
ProofMessage proof = 2;
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
## Nim-POS
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/protobuf_serialization
|
||||
import pkg/stew/results
|
||||
import pkg/stew/objects
|
||||
import pkg/blscurve
|
||||
import pkg/blscurve/blst/blst_abi
|
||||
|
||||
import_proto3 "por.proto"
|
||||
|
||||
export TauZeroMessage
|
||||
export TauMessage
|
||||
export ProofMessage
|
||||
export PorMessage
|
||||
export PoREnvelope
|
||||
|
||||
import ../por
|
||||
|
||||
func toMessage*(self: Proof): ProofMessage =
|
||||
var
|
||||
message = ProofMessage()
|
||||
sigma: array[96, byte]
|
||||
|
||||
for mu in self.mu:
|
||||
var
|
||||
serialized: array[32, byte]
|
||||
blst_bendian_from_scalar(serialized, mu)
|
||||
message.mu.add(toSeq(serialized))
|
||||
|
||||
blst_p1_serialize(sigma, self.sigma)
|
||||
message.sigma = toSeq(sigma)
|
||||
|
||||
message
|
||||
|
||||
func fromMessage*(self: ProofMessage): Result[Proof, string] =
|
||||
var
|
||||
proof = Proof()
|
||||
sigmaAffine: blst_p1_affine
|
||||
|
||||
if blst_p1_deserialize(sigmaAffine, toArray(96, self.sigma)) != BLST_SUCCESS:
|
||||
return err("Unable to decompress sigma")
|
||||
|
||||
blst_p1_from_affine(proof.sigma, sigmaAffine)
|
||||
|
||||
for mu in self.mu:
|
||||
var
|
||||
muScalar: blst_scalar
|
||||
blst_scalar_from_bendian(muScalar, toArray(32, mu))
|
||||
|
||||
proof.mu.add(muScalar)
|
||||
|
||||
ok(proof)
|
||||
|
||||
func toMessage*(self: TauZero): TauZeroMessage =
|
||||
var
|
||||
message = TauZeroMessage(
|
||||
name: toSeq(self.name),
|
||||
n: self.n)
|
||||
|
||||
for u in self.u:
|
||||
var
|
||||
serialized: array[96, byte]
|
||||
|
||||
# serialized and compresses the points
|
||||
blst_p1_serialize(serialized, u)
|
||||
message.u.add(toSeq(serialized))
|
||||
|
||||
message
|
||||
|
||||
func fromMessage*(self: TauZeroMessage): Result[TauZero, string] =
|
||||
var
|
||||
tauZero: TauZero
|
||||
|
||||
tauZero.name = toArray(512, self.name)
|
||||
tauZero.n = self.n
|
||||
|
||||
for u in self.u:
|
||||
var
|
||||
uuAffine: blst_p1_affine
|
||||
uu: blst_p1
|
||||
|
||||
if blst_p1_deserialize(uuAffine, toArray(96, u)) != BLST_SUCCESS:
|
||||
return err("Unable to decompress u")
|
||||
|
||||
blst_p1_from_affine(uu, uuAffine)
|
||||
tauZero.u.add(uu)
|
||||
|
||||
ok(tauZero)
|
||||
|
||||
func toMessage*(self: Tau): TauMessage =
|
||||
TauMessage(
|
||||
t: self.t.toMessage(),
|
||||
signature: toSeq(self.signature)) # signature is already in serialized form
|
||||
|
||||
func fromMessage*(self: TauMessage): Result[Tau, string] =
|
||||
var
|
||||
message = Tau(
|
||||
t: ? self.t.fromMessage(),
|
||||
signature: toArray(96, self.signature))
|
||||
|
||||
ok(message)
|
||||
|
||||
func toMessage*(self: por.PublicKey): PubKeyMessage =
|
||||
var
|
||||
signkey = toSeq(self.signkey.exportUncompressed())
|
||||
message = PubKeyMessage(signkey: signkey)
|
||||
key: array[192, byte]
|
||||
|
||||
blst_p2_serialize(key, self.key)
|
||||
message.key = toSeq(key)
|
||||
|
||||
message
|
||||
|
||||
func fromMessage*(self: PubKeyMessage): Result[por.PublicKey, string] =
|
||||
var
|
||||
spk: por.PublicKey
|
||||
keyAffine: blst_p2_affine
|
||||
|
||||
if not spk.signkey.fromBytes(self.signkey.toOpenArray(0, 95)):
|
||||
return err("Unable to deserialize public key!")
|
||||
|
||||
if blst_p2_deserialize(keyAffine, toArray(192, self.key)) != BLST_SUCCESS:
|
||||
return err("Unable to decompress key!")
|
||||
|
||||
blst_p2_from_affine(spk.key, keyAffine)
|
||||
|
||||
ok(spk)
|
||||
|
||||
func toMessage*(self: PoR): PorMessage =
|
||||
var
|
||||
message = PorMessage(
|
||||
tau: self.tau.toMessage(),
|
||||
spk: self.spk.toMessage())
|
||||
|
||||
for sigma in self.authenticators:
|
||||
var
|
||||
serialized: array[96, byte]
|
||||
|
||||
blst_p1_serialize(serialized, sigma)
|
||||
message.authenticators.add(toSeq(serialized))
|
||||
|
||||
message
|
||||
|
||||
func fromMessage*(self: PorMessage): Result[PoR, string] =
|
||||
var
|
||||
por = PoR(
|
||||
tau: ? self.tau.fromMessage(),
|
||||
spk: ? self.spk.fromMessage())
|
||||
|
||||
for sigma in self.authenticators:
|
||||
var
|
||||
sigmaAffine: blst_p1_affine
|
||||
authenticator: blst_p1
|
||||
|
||||
if blst_p1_deserialize(sigmaAffine, toArray(96, sigma)) != BLST_SUCCESS:
|
||||
return err("Unable to decompress sigma")
|
||||
|
||||
blst_p1_from_affine(authenticator, sigmaAffine)
|
||||
por.authenticators.add(authenticator)
|
||||
|
||||
return ok(por)
|
|
@ -0,0 +1,98 @@
|
|||
## Nim-Dagger
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/contractabi/address as ca
|
||||
|
||||
import ../stores
|
||||
import ../manifest
|
||||
import ../streams
|
||||
import ../utils
|
||||
|
||||
import ./por
|
||||
import ./stpnetwork
|
||||
import ./stpproto
|
||||
import ./stpstore
|
||||
import ./timing
|
||||
|
||||
export stpnetwork, stpstore, por, timing, stpproto
|
||||
|
||||
type
|
||||
StorageProofs* = object
|
||||
store*: BlockStore
|
||||
network*: StpNetwork
|
||||
stpStore*: StpStore
|
||||
|
||||
proc upload*(
|
||||
self: StorageProofs,
|
||||
cid: Cid,
|
||||
indexes: seq[int],
|
||||
host: ca.Address): Future[?!void] {.async.} =
|
||||
## Upload authenticators
|
||||
##
|
||||
|
||||
without por =? (await self.stpStore.retrieve(cid)):
|
||||
trace "Unable to retrieve por data from store", cid
|
||||
return failure("Unable to retrieve por data from store")
|
||||
|
||||
return await self.network.uploadTags(
|
||||
cid,
|
||||
indexes,
|
||||
por.authenticators,
|
||||
host)
|
||||
|
||||
# proc proof*() =
|
||||
# discard
|
||||
|
||||
# proc verify*() =
|
||||
# discard
|
||||
|
||||
proc setupProofs*(
|
||||
self: StorageProofs,
|
||||
manifest: Manifest): Future[?!void] {.async.} =
|
||||
## Setup storage authentication
|
||||
##
|
||||
|
||||
without cid =? manifest.cid:
|
||||
return failure("Unable to retrieve Cid from manifest!")
|
||||
|
||||
let
|
||||
(spk, ssk) = keyGen()
|
||||
por = await PoR.init(
|
||||
StoreStream.new(self.store, manifest),
|
||||
ssk,
|
||||
spk,
|
||||
manifest.blockSize)
|
||||
|
||||
return await self.stpStore.store(por.toMessage(), cid)
|
||||
|
||||
proc init*(
|
||||
T: type StorageProofs,
|
||||
network: StpNetwork,
|
||||
store: BlockStore,
|
||||
stpStore: StpStore): StorageProofs =
|
||||
|
||||
var
|
||||
self = T(
|
||||
store: store,
|
||||
stpStore: stpStore,
|
||||
network: network)
|
||||
|
||||
proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} =
|
||||
try:
|
||||
await self.stpStore.store(msg.cid, msg.tags).tryGet()
|
||||
trace "Stored tags", cid = $msg.cid, tags = msg.tags.len
|
||||
except CatchableError as exc:
|
||||
trace "Exception attempting to store tags", exc = exc.msg
|
||||
|
||||
self.network.tagsHandler = tagsHandler
|
||||
self
|
|
@ -0,0 +1,15 @@
|
|||
syntax = "proto3";
|
||||
|
||||
message StorageProofsMessage {
|
||||
message Tag {
|
||||
int64 idx = 1;
|
||||
bytes tag = 2;
|
||||
}
|
||||
|
||||
message TagsMessage {
|
||||
bytes cid = 1;
|
||||
repeated Tag tags = 2;
|
||||
}
|
||||
|
||||
TagsMessage tagsMsg = 1;
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
## Nim-Dagger
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/contractabi/address as ca
|
||||
import pkg/protobuf_serialization
|
||||
|
||||
import ./stpproto
|
||||
import ../discovery
|
||||
|
||||
const
|
||||
Codec* = "/dagger/storageproofs/1.0.0"
|
||||
MaxMessageSize* = 1 shl 22 # 4MB
|
||||
|
||||
logScope:
|
||||
topics = "dagger storageproofs network"
|
||||
|
||||
type
|
||||
TagsHandler* = proc(msg: TagsMessage):
|
||||
Future[void] {.raises: [Defect], gcsafe.}
|
||||
|
||||
StpNetwork* = ref object of LPProtocol
|
||||
switch*: Switch
|
||||
discovery*: Discovery
|
||||
tagsHandle*: TagsHandler
|
||||
|
||||
proc uploadTags*(
|
||||
self: StpNetwork,
|
||||
cid: Cid,
|
||||
indexes: seq[int],
|
||||
tags: seq[seq[byte]],
|
||||
host: ca.Address): Future[?!void] {.async.} =
|
||||
# Upload tags to `host`
|
||||
#
|
||||
|
||||
var msg = TagsMessage(cid: cid.data.buffer)
|
||||
for i in indexes:
|
||||
msg.tags.add(Tag(idx: i, tag: tags[i]))
|
||||
|
||||
let
|
||||
peers = await self.discovery.find(host)
|
||||
connFut = await one(peers.mapIt(
|
||||
self.switch.dial(
|
||||
it.data.peerId,
|
||||
it.data.addresses.mapIt( it.address ),
|
||||
@[Codec])))
|
||||
conn = await connFut
|
||||
|
||||
try:
|
||||
await conn.writeLp(
|
||||
Protobuf.encode(StorageProofsMessage(tagsMsg: msg)))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Exception submitting tags", cid, exc = exc.msg
|
||||
return failure(exc.msg)
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return success()
|
||||
|
||||
method init*(self: StpNetwork) =
|
||||
## Perform protocol initialization
|
||||
##
|
||||
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
try:
|
||||
let
|
||||
msg = await conn.readLp(MaxMessageSize)
|
||||
message = Protobuf.decode(msg, StorageProofsMessage)
|
||||
|
||||
if message.tagsMsg.tags.len > 0 and not self.tagsHandle.isNil:
|
||||
await self.tagsHandle(message.tagsMsg)
|
||||
except CatchableError as exc:
|
||||
trace "Exception handling Storage Proofs message", exc = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
self.handler = handle
|
||||
self.codec = Codec
|
||||
|
||||
proc new*(
|
||||
T: type StpNetwork,
|
||||
switch: Switch,
|
||||
discovery: Discovery): StpNetwork =
|
||||
let
|
||||
self = StpNetwork(
|
||||
switch: switch,
|
||||
discovery: discovery)
|
||||
|
||||
self.init()
|
||||
self
|
|
@ -0,0 +1,7 @@
|
|||
import pkg/protobuf_serialization
|
||||
|
||||
import_proto3 "stp.proto"
|
||||
|
||||
export StorageProofsMessage
|
||||
export TagsMessage
|
||||
export Tag
|
|
@ -0,0 +1,121 @@
|
|||
## Nim-Dagger
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/os
|
||||
import std/strformat
|
||||
|
||||
import pkg/libp2p
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/stew/io2
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/protobuf_serialization
|
||||
|
||||
import ./stpproto
|
||||
import ./por
|
||||
|
||||
type
|
||||
StpStore* = object
|
||||
authDir*: string
|
||||
postfixLen*: int
|
||||
|
||||
template stpPath*(self: StpStore, cid: Cid): string =
|
||||
self.authDir / ($cid)[^self.postfixLen..^1] / $cid
|
||||
|
||||
proc retrieve*(
|
||||
self: StpStore,
|
||||
cid: Cid): Future[?!PorMessage] {.async.} =
|
||||
## Retrieve authenticators from data store
|
||||
##
|
||||
|
||||
let path = self.stpPath(cid) / "por"
|
||||
var data: seq[byte]
|
||||
if (
|
||||
let res = io2.readFile(path, data);
|
||||
res.isErr):
|
||||
let error = io2.ioErrorMsg(res.error)
|
||||
trace "Cannot retrieve storage proof data from fs", path , error
|
||||
return failure("Cannot retrieve storage proof data from fs")
|
||||
|
||||
return Protobuf.decode(data, PorMessage).success
|
||||
|
||||
proc store*(
|
||||
self: StpStore,
|
||||
por: PorMessage,
|
||||
cid: Cid): Future[?!void] {.async.} =
|
||||
## Persist storage proofs
|
||||
##
|
||||
|
||||
let
|
||||
dir = self.stpPath(cid)
|
||||
|
||||
if io2.createPath(dir).isErr:
|
||||
trace "Unable to create storage proofs prefix dir", dir
|
||||
return failure(&"Unable to create storage proofs prefix dir ${dir}")
|
||||
|
||||
let path = dir / "por"
|
||||
if (
|
||||
let res = io2.writeFile(path, Protobuf.encode(por));
|
||||
res.isErr):
|
||||
let error = io2.ioErrorMsg(res.error)
|
||||
trace "Unable to store storage proofs", path, cid = cid, error
|
||||
return failure(
|
||||
&"Unable to store storage proofs - path = ${path} cid = ${$cid} error = ${error}")
|
||||
|
||||
return success()
|
||||
|
||||
proc retrieve*(
|
||||
self: StpStore,
|
||||
cid: Cid,
|
||||
blocks: seq[int]): Future[?!seq[Tag]] {.async.} =
|
||||
var tags: seq[Tag]
|
||||
for b in blocks:
|
||||
var tag = Tag(idx: b)
|
||||
let path = self.stpPath(cid) / $b
|
||||
if (
|
||||
let res = io2.readFile(path, tag.tag);
|
||||
res.isErr):
|
||||
let error = io2.ioErrorMsg(res.error)
|
||||
trace "Cannot retrieve tags from fs", path , error
|
||||
return failure("Cannot retrieve tags from fs")
|
||||
tags.add(tag)
|
||||
|
||||
return tags.success
|
||||
|
||||
proc store*(
|
||||
self: StpStore,
|
||||
tags: seq[Tag],
|
||||
cid: Cid): Future[?!void] {.async.} =
|
||||
let
|
||||
dir = self.stpPath(cid)
|
||||
|
||||
if io2.createPath(dir).isErr:
|
||||
trace "Unable to create storage proofs prefix dir", dir
|
||||
return failure(&"Unable to create storage proofs prefix dir ${dir}")
|
||||
|
||||
for t in tags:
|
||||
let path = dir / $t.idx
|
||||
if (
|
||||
let res = io2.writeFile(path, t.tag);
|
||||
res.isErr):
|
||||
let error = io2.ioErrorMsg(res.error)
|
||||
trace "Unable to store tags", path, cid = cid, error
|
||||
return failure(
|
||||
&"Unable to store tags - path = ${path} cid = ${$cid} error = ${error}")
|
||||
|
||||
return success()
|
||||
|
||||
proc init*(
|
||||
T: type StpStore,
|
||||
authDir: string,
|
||||
postfixLen: int = 2): StpStore =
|
||||
T(
|
||||
authDir: authDir,
|
||||
postfixLen: postfixLen)
|
|
@ -0,0 +1,4 @@
|
|||
import ./timing/periods
|
||||
import ./timing/proofs
|
||||
|
||||
export periods, proofs
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -29,7 +29,7 @@ import ../errors
|
|||
export blockstore
|
||||
|
||||
logScope:
|
||||
topics = "dagger cachestore"
|
||||
topics = "codex cachestore"
|
||||
|
||||
type
|
||||
CacheStore* = ref object of BlockStore
|
||||
|
@ -37,7 +37,7 @@ type
|
|||
size*: Positive # in bytes
|
||||
cache: LruCache[Cid, Block]
|
||||
|
||||
InvalidBlockSize* = object of DaggerError
|
||||
InvalidBlockSize* = object of CodexError
|
||||
|
||||
const
|
||||
MiB* = 1024 * 1024 # bytes, 1 mebibyte = 1,048,576 bytes
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -26,7 +26,7 @@ import ./blockstore
|
|||
export blockstore
|
||||
|
||||
logScope:
|
||||
topics = "dagger fsstore"
|
||||
topics = "codex fsstore"
|
||||
|
||||
type
|
||||
FSStore* = ref object of BlockStore
|
||||
|
@ -150,10 +150,6 @@ method listBlocks*(self: FSStore, onBlock: OnBlock) {.async.} =
|
|||
except CatchableError as exc:
|
||||
trace "Couldn't get block", cid = $(cid.get())
|
||||
|
||||
# TODO: this should run on a thread which
|
||||
# wouldn't need the sleep
|
||||
await sleepAsync(100.millis) # avoid blocking
|
||||
|
||||
proc new*(
|
||||
T: type FSStore,
|
||||
repoDir: string,
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -24,7 +24,7 @@ import ../blockexchange
|
|||
export blockstore, blockexchange, asyncheapqueue
|
||||
|
||||
logScope:
|
||||
topics = "dagger networkstore"
|
||||
topics = "codex networkstore"
|
||||
|
||||
type
|
||||
NetworkStore* = ref object of BlockStore
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2022 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
|
@ -14,7 +14,7 @@ import pkg/chronicles
|
|||
export libp2p, chronos, chronicles
|
||||
|
||||
logScope:
|
||||
topics = "dagger seekablestream"
|
||||
topics = "codex seekablestream"
|
||||
|
||||
type
|
||||
SeekableStream* = ref object of LPStream
|
|
@ -31,6 +31,7 @@ type
|
|||
StoreStream* = ref object of SeekableStream
|
||||
store*: BlockStore
|
||||
manifest*: Manifest
|
||||
emptyBlock*: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: type StoreStream,
|
||||
|
@ -39,7 +40,8 @@ proc new*(
|
|||
result = T(
|
||||
store: store,
|
||||
manifest: manifest,
|
||||
offset: 0)
|
||||
offset: 0,
|
||||
emptyBlock: newSeq[byte](manifest.blockSize))
|
||||
|
||||
result.initStream()
|
||||
|
||||
|
@ -57,12 +59,12 @@ method readOnce*(
|
|||
var
|
||||
read = 0
|
||||
|
||||
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len
|
||||
while read < nbytes and not self.atEof:
|
||||
let
|
||||
pos = self.offset div self.manifest.blockSize
|
||||
blk = (await self.store.getBlock(self.manifest[pos])).tryGet()
|
||||
|
||||
let
|
||||
blockOffset =
|
||||
if self.offset >= self.manifest.blockSize:
|
||||
self.offset mod self.manifest.blockSize
|
||||
|
@ -75,7 +77,15 @@ method readOnce*(
|
|||
else:
|
||||
min(nbytes - read, self.manifest.blockSize)
|
||||
|
||||
copyMem(pbytes.offset(read), unsafeAddr blk.data[blockOffset], readBytes)
|
||||
trace "Reading bytes from store stream", pos, cid = blk.cid, bytes = readBytes, blockOffset = blockOffset
|
||||
copyMem(
|
||||
pbytes.offset(read),
|
||||
if blk.isEmpty:
|
||||
self.emptyBlock[blockOffset].addr
|
||||
else:
|
||||
blk.data[blockOffset].addr,
|
||||
readBytes)
|
||||
|
||||
self.offset += readBytes
|
||||
read += readBytes
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import ./utils/asyncheapqueue
|
||||
import ./utils/fileutils
|
||||
|
||||
export asyncheapqueue, fileutils
|
||||
export asyncheapqueue, fileutils
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -1,4 +1,4 @@
|
|||
## Nim-Dagger
|
||||
## Nim-Codex
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
@ -75,8 +75,8 @@ switch("warning", "LockLevel:off")
|
|||
|
||||
switch("define", "libp2p_pki_schemes=secp256k1")
|
||||
#TODO this infects everything in this folder, ideally it would only
|
||||
# apply to dagger.nim, but since dagger.nims is used for other purpose
|
||||
# we can't use it. And dagger.cfg doesn't work
|
||||
# apply to codex.nim, but since codex.nims is used for other purpose
|
||||
# we can't use it. And codex.cfg doesn't work
|
||||
switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic]")
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
import ./peers/peerctxstore
|
||||
|
||||
export peerctxstore
|
|
@ -1,9 +0,0 @@
|
|||
Nim implementation of Proof of Storage related schemes
|
||||
|
||||
Implementation of the BLS-based public PoS scheme from
|
||||
Shacham H., Waters B., "Compact Proofs of Retrievability"
|
||||
using pairing over BLS12-381 ECC.
|
||||
|
||||
For a detailed description of the implementation, see `bls.nim`
|
||||
|
||||
For a usage example, see `testbls.nim`
|
|
@ -1,9 +0,0 @@
|
|||
import times, strutils
|
||||
export strutils.formatFloat
|
||||
|
||||
template benchmark*(benchmarkName: string, code: untyped) =
|
||||
let t0 = epochTime()
|
||||
code
|
||||
let elapsed = epochTime() - t0
|
||||
let elapsedStr = elapsed.formatFloat(format = ffDecimal, precision = 3)
|
||||
echo "CPU Time [", benchmarkName, "] ", elapsedStr, "s"
|
|
@ -1 +0,0 @@
|
|||
The quick brown fox jumps over the lazy dog!
|
|
@ -1,37 +0,0 @@
|
|||
## Nim-POS
|
||||
## Copyright (c) 2021 Status Research & Development GmbH
|
||||
## Licensed under either of
|
||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
## at your option.
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import por
|
||||
import benchmark
|
||||
import strutils
|
||||
|
||||
const sectorsperblock = 1024.int64
|
||||
const querylen = 22
|
||||
|
||||
proc testbls() : bool =
|
||||
benchmark "Key generation":
|
||||
let (spk, ssk) = por.keygen()
|
||||
|
||||
benchmark "Auth generation (s=" & $sectorsperblock & ")":
|
||||
let (tau, authenticators) = por.setup(ssk, sectorsperblock, "example.txt")
|
||||
#echo "Auth: ", authenticators
|
||||
|
||||
benchmark "Generating challenge (q=" & $querylen & ")":
|
||||
let q = por.generateQuery(tau, spk, querylen)
|
||||
#echo "Generated!" #, " q:", q
|
||||
|
||||
benchmark "Issuing proof":
|
||||
let (mu, sigma) = por.generateProof(q, authenticators, spk, sectorsperblock, "example.txt")
|
||||
#echo "Issued!" #, " mu:", mu, " sigma:", sigma
|
||||
|
||||
benchmark "Verifying proof":
|
||||
result = por.verifyProof(tau, q, mu, sigma, spk)
|
||||
echo "Result: ", result
|
||||
|
||||
let r = testbls()
|
|
@ -8,11 +8,11 @@ import pkg/chronos
|
|||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/dagger/rng
|
||||
import pkg/dagger/stores
|
||||
import pkg/dagger/blockexchange
|
||||
import pkg/dagger/chunker
|
||||
import pkg/dagger/blocktype as bt
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/blockexchange
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/blocktype as bt
|
||||
|
||||
import ../../helpers/mockdiscovery
|
||||
|
||||
|
@ -43,7 +43,7 @@ suite "Block Advertising and Discovery":
|
|||
blocks.add(bt.Block.new(chunk).tryGet())
|
||||
|
||||
switch = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
|
||||
blockDiscovery = MockDiscovery.new(switch.peerInfo, 0.Port)
|
||||
blockDiscovery = MockDiscovery.new()
|
||||
wallet = WalletRef.example
|
||||
network = BlockExcNetwork.new(switch)
|
||||
localStore = CacheStore.new(blocks.mapIt( it ))
|
||||
|
@ -76,7 +76,7 @@ suite "Block Advertising and Discovery":
|
|||
|
||||
await engine.start()
|
||||
|
||||
blockDiscovery.publishProvideHandler =
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid): Future[void] {.async, gcsafe.} =
|
||||
return
|
||||
|
||||
|
@ -94,7 +94,7 @@ suite "Block Advertising and Discovery":
|
|||
advertised = initTable.collect:
|
||||
for b in blocks: {b.cid: newFuture[void]()}
|
||||
|
||||
blockDiscovery.publishProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
||||
blockDiscovery.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
||||
if cid in advertised and not advertised[cid].finished():
|
||||
advertised[cid].complete()
|
||||
|
||||
|
@ -150,7 +150,7 @@ suite "E2E - Multiple Nodes Discovery":
|
|||
for _ in 0..<4:
|
||||
let
|
||||
s = newStandardSwitch(transportFlags = {ServerFlags.ReuseAddr})
|
||||
blockDiscovery = MockDiscovery.new(s.peerInfo, 0.Port)
|
||||
blockDiscovery = MockDiscovery.new()
|
||||
wallet = WalletRef.example
|
||||
network = BlockExcNetwork.new(s)
|
||||
localStore = CacheStore.new()
|
||||
|
@ -189,15 +189,15 @@ suite "E2E - Multiple Nodes Discovery":
|
|||
var advertised: Table[Cid, SignedPeerRecord]
|
||||
|
||||
MockDiscovery(blockexc[1].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised.add(cid, switch[1].peerInfo.signedPeerRecord)
|
||||
|
||||
MockDiscovery(blockexc[2].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised.add(cid, switch[2].peerInfo.signedPeerRecord)
|
||||
|
||||
MockDiscovery(blockexc[3].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised.add(cid, switch[3].peerInfo.signedPeerRecord)
|
||||
|
||||
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
|
||||
|
@ -231,15 +231,15 @@ suite "E2E - Multiple Nodes Discovery":
|
|||
var advertised: Table[Cid, SignedPeerRecord]
|
||||
|
||||
MockDiscovery(blockexc[1].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised[cid] = switch[1].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[2].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised[cid] = switch[2].peerInfo.signedPeerRecord
|
||||
|
||||
MockDiscovery(blockexc[3].engine.discovery.discovery)
|
||||
.publishProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} =
|
||||
advertised[cid] = switch[3].peerInfo.signedPeerRecord
|
||||
|
||||
await blockexc[1].engine.blocksHandler(switch[0].peerInfo.peerId, blocks[0..5])
|
|
@ -5,15 +5,14 @@ import std/tables
|
|||
import pkg/asynctest
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/dagger/rng
|
||||
import pkg/dagger/stores
|
||||
import pkg/dagger/blockexchange
|
||||
import pkg/dagger/chunker
|
||||
import pkg/dagger/blocktype as bt
|
||||
import pkg/dagger/blockexchange/engine
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/blockexchange
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/blockexchange/engine
|
||||
|
||||
import ../../helpers/mockdiscovery
|
||||
|
||||
|
@ -80,7 +79,7 @@ suite "Test Discovery Engine":
|
|||
for b in blocks:
|
||||
{ b.cid: newFuture[void]() }
|
||||
|
||||
blockDiscovery.publishProvideHandler =
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} =
|
||||
if not haves[cid].finished:
|
||||
haves[cid].complete
|
||||
|
@ -125,7 +124,7 @@ suite "Test Discovery Engine":
|
|||
discoveryLoopSleep = 100.millis)
|
||||
have = newFuture[void]()
|
||||
|
||||
blockDiscovery.publishProvideHandler =
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} =
|
||||
check cid == blocks[0].cid
|
||||
if not have.finished:
|
||||
|
@ -217,7 +216,7 @@ suite "Test Discovery Engine":
|
|||
reqs = newFuture[void]()
|
||||
count = 0
|
||||
|
||||
blockDiscovery.publishProvideHandler =
|
||||
blockDiscovery.publishBlockProvideHandler =
|
||||
proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} =
|
||||
check cid == blocks[0].cid
|
||||
if count > 0:
|
|
@ -8,15 +8,14 @@ import pkg/stew/byteutils
|
|||
import pkg/libp2p
|
||||
import pkg/libp2p/errors
|
||||
|
||||
import pkg/dagger/rng
|
||||
import pkg/dagger/stores
|
||||
import pkg/dagger/blockexchange
|
||||
import pkg/dagger/chunker
|
||||
import pkg/dagger/discovery
|
||||
import pkg/dagger/blocktype as bt
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/blockexchange
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/discovery
|
||||
import pkg/codex/blocktype as bt
|
||||
|
||||
import ../helpers
|
||||
import ../examples
|
||||
import ../../helpers
|
||||
|
||||
suite "NetworkStore engine - 2 nodes":
|
||||
let
|
|
@ -9,16 +9,16 @@ import pkg/libp2p
|
|||
import pkg/libp2p/routing_record
|
||||
import pkg/libp2pdht/discv5/protocol as discv5
|
||||
|
||||
import pkg/dagger/rng
|
||||
import pkg/dagger/blockexchange
|
||||
import pkg/dagger/stores
|
||||
import pkg/dagger/chunker
|
||||
import pkg/dagger/discovery
|
||||
import pkg/dagger/blocktype as bt
|
||||
import pkg/dagger/utils/asyncheapqueue
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/blockexchange
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/discovery
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/utils/asyncheapqueue
|
||||
|
||||
import ../helpers
|
||||
import ../examples
|
||||
import ../../helpers
|
||||
import ../../examples
|
||||
|
||||
suite "NetworkStore engine basic":
|
||||
var
|
|
@ -1,6 +1,6 @@
|
|||
import std/unittest
|
||||
|
||||
import pkg/dagger/stores
|
||||
import pkg/codex/stores
|
||||
import ../../examples
|
||||
|
||||
suite "engine payments":
|
|
@ -2,7 +2,7 @@ import pkg/asynctest
|
|||
import pkg/chronos
|
||||
import pkg/stew/byteutils
|
||||
import ../../examples
|
||||
import pkg/dagger/stores
|
||||
import pkg/codex/stores
|
||||
|
||||
suite "account protobuf messages":
|
||||
|
|
@ -2,7 +2,7 @@ import pkg/asynctest
|
|||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
|
||||
import pkg/dagger/blockexchange/protobuf/presence
|
||||
import pkg/codex/blockexchange/protobuf/presence
|
||||
import ../../examples
|
||||
|
||||
suite "block presence protobuf messages":
|
|
@ -0,0 +1,4 @@
|
|||
import ./discovery/testdiscovery
|
||||
import ./discovery/testdiscoveryengine
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
|
@ -0,0 +1,5 @@
|
|||
import ./engine/testengine
|
||||
import ./engine/testblockexc
|
||||
import ./engine/testpayments
|
||||
|
||||
{.warning[UnusedImport]: off.}
|
|
@ -7,10 +7,10 @@ import pkg/libp2p
|
|||
import pkg/libp2p/errors
|
||||
import pkg/protobuf_serialization
|
||||
|
||||
import pkg/dagger/rng
|
||||
import pkg/dagger/chunker
|
||||
import pkg/dagger/blocktype as bt
|
||||
import pkg/dagger/blockexchange
|
||||
import pkg/codex/rng
|
||||
import pkg/codex/chunker
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/blockexchange
|
||||
|
||||
import ../helpers
|
||||
import ../examples
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue