removing old por proofs implementation (#593)

This commit is contained in:
Dmitriy Ryajov 2023-10-23 08:58:07 -06:00 committed by GitHub
parent a77d0cdcec
commit 8a7d74e6b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 0 additions and 1627 deletions

View File

@ -1,4 +0,0 @@
import ./por/serialization
import ./por/por
export por, serialization

View File

@ -1,482 +0,0 @@
## Nim-Codex
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
# Implementation of the BLS-based public PoS scheme from
# Shacham H., Waters B., "Compact Proofs of Retrievability"
# using pairing over BLS12-381 ECC
#
# Notation from the paper
# In Z:
# - n: number of blocks
# - s: number of sectors per block
#
# In Z_p: modulo curve order
# - m_{ij}: sectors of the file i:0..n-1 j:0..s-1
# - α: PoS secret key
# - name: random string
# - μ_j: part of proof, j:0..s-1
#
# In G_1: multiplicative cyclic group
# - H: {0,1} →G_1 : hash function
# - u_1,…,u_s ←R G_1 : random coefficients
# - σ_i: authenticators
# - σ: part of proof
#
# In G_2: multiplicative cyclic group
# - g: generator of G_2
# - v ← g^α: PoS public key
#
# In G_T:
# - used only to calculate the two pairings during validation
#
# Implementation:
# Our implementation uses additive cyclic groups instead of the multiplicative
# cyclic group in the paper, thus changing the name of the group operation as in
# blscurve and blst. Thus, point multiplication becomes point addition, and scalar
# exponentiation becomes scalar multiplicaiton.
#
# Number of operations:
# The following table summarizes the number of operations in different phases
# using the following notation:
# - f: file size expressed in units of 31 bytes
# - n: number of blocks
# - s: number of sectors per block
# - q: number of query items
#
# Since f = n * s and s is a parameter of the scheme, it is better to express
# the cost as a function of f and s. This only matters for Setup, all other
# phases are independent of the file size assuming a given q.
#
# | | Setup | Challenge | Proof | Verify |
# |----------------|-----------|---------------|-----------|-----------|-----------|
# | G1 random | s = s | q | | |
# | G1 scalar mult | n * (s+1) = f * (1 + 1/s) | | q | q + s |
# | G1 add | n * s = f | | q-1 | q-1 + s-1 |
# | Hash to G1 | n = f / s | | | q |
# | Z_p mult | = | | s * q | |
# | Z_p add | = | | s * (q-1) | |
# | pairing | = | | | 2 |
#
#
# Storage and communication cost:
# The storage overhead for a file of f_b bytes is given by the n authenticators
# calculated in the setup phase.
# f_b = f * 31 = n * s * 31
# Each authenticator is a point on G_1, which occupies 48 bytes in compressed form.
# Thus, the overall sorage size in bytes is:
# f_pos = fb + n * 48 = fb * (1 + (48/31) * (1/s))
#
# Communicaiton cost in the Setup phase is simply related to the storage cost.
# The size of the challenge is
# q * (8 + 48) bytes
# The size of the proof is instead
# s * 32 + 48 bytes
import std/endians
import pkg/chronos
import pkg/blscurve
import pkg/blscurve/blst/blst_abi
import ../../rng
import ../../streams
# sector size in bytes. Must be smaller than the subgroup order r
# which is 255 bits long for BLS12-381
const
BytesPerSector* = 31
# length in bytes of the unique (random) name
Namelen = 512
type
# a single sector
ZChar* = array[BytesPerSector, byte]
# secret key combining the metadata signing key and the POR generation key
SecretKey* = object
signkey*: blscurve.SecretKey
key*: blst_scalar
# public key combining the metadata signing key and the POR validation key
PublicKey* = object
signkey*: blscurve.PublicKey
key*: blst_p2
# POR metadata (called "file tag t_0" in the original paper)
TauZero* = object
name*: array[Namelen, byte]
n*: int64
u*: seq[blst_p1]
# signed POR metadata (called "signed file tag t" in the original paper)
Tau* = object
t*: TauZero
signature*: array[96, byte]
Proof* = object
mu*: seq[blst_scalar]
sigma*: blst_p1
# PoR query element
QElement* = object
i*: int64
v*: blst_scalar
PoR* = object
ssk*: SecretKey
spk*: PublicKey
tau*: Tau
authenticators*: seq[blst_p1]
proc fromBytesBE(a: openArray[byte]): blst_scalar =
## Convert data to blst native form
##
var b: array[32, byte]
doAssert(a.len <= b.len)
let d = b.len - a.len
for i in 0..<a.len:
b[i+d] = a[i]
blst_scalar_from_bendian(result, b)
doAssert(blst_scalar_fr_check(result).bool)
proc getSector(
stream: SeekableStream,
blockId: int64,
sectorId: int64,
spb: int64): Future[ZChar] {.async.} =
## Read file sector at given <blockid, sectorid> postion
##
var res: ZChar
stream.setPos(((blockId * spb + sectorId) * ZChar.len).int)
discard await stream.readOnce(addr res[0], ZChar.len)
return res
proc rndScalar(): blst_scalar =
## Generate random scalar within the subroup order r
##
var scal : array[32, byte]
var scalar : blst_scalar
while true:
for val in scal.mitems:
val = byte Rng.instance.rand(0xFF)
scalar.blst_scalar_from_bendian(scal)
if blst_scalar_fr_check(scalar).bool:
break
return scalar
proc rndP2(): (blst_p2, blst_scalar) =
## Generate random point on G2
##
var
x : blst_p2
x.blst_p2_from_affine(BLS12_381_G2) # init from generator
let
scalar = rndScalar()
x.blst_p2_mult(x, scalar, 255)
return (x, scalar)
proc rndP1(): (blst_p1, blst_scalar) =
## Generate random point on G1
var
x : blst_p1
x.blst_p1_from_affine(BLS12_381_G1) # init from generator
let
scalar = rndScalar()
x.blst_p1_mult(x, scalar, 255)
return (x, scalar)
template posKeygen(): (blst_p2, blst_scalar) =
## Generate POS key pair
##
rndP2()
proc keyGen*(): (PublicKey, SecretKey) =
## Generate key pair for signing metadata and for POS tags
##
var
pk: PublicKey
sk: SecretKey
ikm: array[32, byte]
for b in ikm.mitems:
b = byte Rng.instance.rand(0xFF)
doAssert ikm.keyGen(pk.signkey, sk.signkey)
(pk.key, sk.key) = posKeygen()
return (pk, sk)
proc sectorsCount(stream: SeekableStream, s: int64): int64 =
## Calculate number of blocks for a file
##
let
size = stream.size()
n = ((size - 1) div (s * sizeof(ZChar))) + 1
# debugEcho "File size=", size, " bytes",
# ", blocks=", n,
# ", sectors/block=", $s,
# ", sectorsize=", $sizeof(ZChar), " bytes"
return n
proc hashToG1[T: byte|char](msg: openArray[T]): blst_p1 =
## Hash to curve with Dagger specific domain separation
##
const dst = "DAGGER-PROOF-OF-CONCEPT"
result.blst_hash_to_g1(msg, dst, aug = "")
proc hashNameI(name: array[Namelen, byte], i: int64): blst_p1 =
## Calculate unique filename and block index based hash
##
# # naive implementation, hashing a long string representation
# # such as "[255, 242, 23]1"
# return hashToG1($name & $i)
# more compact and faster implementation
var namei: array[sizeof(name) + sizeof(int64), byte]
namei[0..sizeof(name)-1] = name
bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i))
return hashToG1(namei)
proc generateAuthenticatorOpt(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero,
ubase: seq[blst_scalar]): Future[blst_p1] {.async.} =
## Optimized implementation of authenticator generation
## This implementation is reduces the number of scalar multiplications
## from s+1 to 1+1 , using knowledge about the scalars (r_j)
## used to generate u_j as u_j = g^{r_j}
##
## With the paper's multiplicative notation, we use:
## (H(file||i)\cdot g^{\sum{j=0}^{s-1}{r_j \cdot m[i][j]}})^{\alpha}
##
var sum: blst_fr
var sums: blst_scalar
for j in 0..<s:
var a, b, x: blst_fr
a.blst_fr_from_scalar(ubase[j])
b.blst_fr_from_scalar(fromBytesBE((await stream.getSector(i, j, s))))
x.blst_fr_mul(a, b)
sum.blst_fr_add(sum, x)
sums.blst_scalar_from_fr(sum)
result.blst_p1_from_affine(BLS12_381_G1)
result.blst_p1_mult(result, sums, 255)
result.blst_p1_add_or_double(result, hashNameI(t.name, i))
result.blst_p1_mult(result, ssk.key, 255)
proc generateAuthenticator(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero,
ubase: seq[blst_scalar]): Future[blst_p1] =
## Wrapper to select tag generator implementation
##
# let a = generateAuthenticatorNaive(i, s, t, f, ssk)
return generateAuthenticatorOpt(stream, ssk, i, s, t, ubase)
# doAssert(a.blst_p1_is_equal(b).bool)
proc generateQuery*(tau: Tau, l: int): seq[QElement] =
## Generata a random BLS query of given size
##
let n = tau.t.n # number of blocks
for i in 0..<l:
var q: QElement
q.i = Rng.instance.rand(n-1) #TODO: dedup
q.v = rndScalar() #TODO: fix range
result.add(q)
proc generateProof*(
stream: SeekableStream,
q: seq[QElement],
authenticators: seq[blst_p1],
s: int64
): Future[Proof] {.async.} =
## Generata BLS proofs for a given query
##
var
mu: seq[blst_scalar]
for j in 0..<s:
var
muj: blst_fr
for qelem in q:
let
sect = fromBytesBE((await stream.getSector(qelem.i, j, s)))
var
x, v, sector: blst_fr
sector.blst_fr_from_scalar(sect)
v.blst_fr_from_scalar(qelem.v)
x.blst_fr_mul(v, sector)
muj.blst_fr_add(muj, x)
var
mujs: blst_scalar
mujs.blst_scalar_from_fr(muj)
mu.add(mujs)
var
sigma: blst_p1
for qelem in q:
var
prod: blst_p1
prod.blst_p1_mult(authenticators[qelem.i], qelem.v, 255)
sigma.blst_p1_add_or_double(sigma, prod)
return Proof(mu: mu, sigma: sigma)
proc pairing(a: blst_p1, b: blst_p2): blst_fp12 =
## Calculate pairing G_1,G_2 -> G_T
##
var
aa: blst_p1_affine
bb: blst_p2_affine
l: blst_fp12
blst_p1_to_affine(aa, a)
blst_p2_to_affine(bb, b)
blst_miller_loop(l, bb, aa)
blst_final_exp(result, l)
proc verifyPairingsNaive(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
let e1 = pairing(a1, a2)
let e2 = pairing(b1, b2)
return e1 == e2
proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
## Wrapper to select verify pairings implementation
##
verifyPairingsNaive(a1, a2, b1, b2)
#verifyPairingsNeg(a1, a2, b1, b2)
proc verifyProof*(
self: PoR,
q: seq[QElement],
mus: seq[blst_scalar],
sigma: blst_p1): bool =
## Verify a BLS proof given a query
##
# verify signature on Tau
var signature: blscurve.Signature
if not signature.fromBytes(self.tau.signature):
return false
if not verify(self.spk.signkey, $self.tau.t, signature):
return false
var first: blst_p1
for qelem in q:
var prod: blst_p1
prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.i), qelem.v, 255)
first.blst_p1_add_or_double(first, prod)
doAssert(blst_p1_on_curve(first).bool)
let us = self.tau.t.u
var second: blst_p1
for j in 0..<len(us):
var prod: blst_p1
prod.blst_p1_mult(us[j], mus[j], 255)
second.blst_p1_add_or_double(second, prod)
doAssert(blst_p1_on_curve(second).bool)
var sum: blst_p1
sum.blst_p1_add_or_double(first, second)
var g : blst_p2
g.blst_p2_from_affine(BLS12_381_G2)
return verifyPairings(sum, self.spk.key, sigma, g)
proc init*(
T: type PoR,
stream: SeekableStream,
ssk: SecretKey,
spk: PublicKey,
blockSize: int64
): Future[PoR] {.async.} =
## Set up the POR scheme by generating tags and metadata
##
doAssert(
(blockSize mod BytesPerSector) == 0,
"Block size should be divisible by `BytesPerSector`")
let
s = blockSize div BytesPerSector
n = stream.sectorsCount(s)
# generate a random name
var t = TauZero(n: n)
for i in 0..<Namelen:
t.name[i] = byte Rng.instance.rand(0xFF)
# generate the coefficient vector for combining sectors of a block: U
var ubase: seq[blst_scalar]
for i in 0..<s:
let (u, ub) = rndP1()
t.u.add(u)
ubase.add(ub)
#TODO: a better bytearray conversion of TauZero for the signature might be needed
# the current conversion using $t might be architecture dependent and not unique
let
signature = sign(ssk.signkey, $t)
tau = Tau(t: t, signature: signature.exportRaw())
# generate sigmas
var
sigmas: seq[blst_p1]
for i in 0..<n:
sigmas.add((await stream.generateAuthenticator(ssk, i, s, t, ubase)))
return PoR(
ssk: ssk,
spk: spk,
tau: tau,
authenticators: sigmas)

View File

@ -1,3 +0,0 @@
import ./serialization/serialization
export serialization

View File

@ -1,185 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/questionable/results
import pkg/libp2p/protobuf/minprotobuf
type
TauZeroMessage* = object
name*: seq[byte]
n*: int64
u*: seq[seq[byte]]
TauMessage* = object
t*: TauZeroMessage
signature*: seq[byte]
PubKeyMessage* = object
signkey*: seq[byte]
key*: seq[byte]
PorMessage* = object
tau*: TauMessage
spk*: PubKeyMessage
authenticators*: seq[seq[byte]]
ProofMessage* = object
mu*: seq[seq[byte]]
sigma*: seq[byte]
PoREnvelope* = object
por*: PorMessage
proof*: ProofMessage
func write*(pb: var ProtoBuffer, field: int, value: TauZeroMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.name)
ipb.write(2, value.n.uint64)
for u in value.u:
ipb.write(3, u)
ipb.finish()
pb.write(field, ipb)
func write*(pb: var ProtoBuffer, field: int, value: TauMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.t)
ipb.write(2, value.signature)
ipb.finish()
pb.write(field, ipb)
func write*(pb: var ProtoBuffer, field: int, value: PubKeyMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.signkey)
ipb.write(2, value.key)
ipb.finish()
pb.write(field, ipb)
func write*(pb: var ProtoBuffer, field: int, value: PorMessage) =
var ipb = initProtoBuffer()
ipb.write(1, value.tau)
ipb.write(2, value.spk)
for a in value.authenticators:
ipb.write(3, a)
ipb.finish()
pb.write(field, ipb)
func encode*(msg: PorMessage): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, msg.tau)
ipb.write(2, msg.spk)
for a in msg.authenticators:
ipb.write(3, a)
ipb.finish
ipb.buffer
func write*(pb: var ProtoBuffer, field: int, value: ProofMessage) =
var ipb = initProtoBuffer()
for mu in value.mu:
ipb.write(1, mu)
ipb.write(2, value.sigma)
ipb.finish()
pb.write(field, ipb)
func encode*(message: PoREnvelope): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, message.por)
ipb.write(2, message.proof)
ipb.finish
ipb.buffer
proc decode*(_: type TauZeroMessage, pb: ProtoBuffer): ProtoResult[TauZeroMessage] =
var
value = TauZeroMessage()
discard ? pb.getField(1, value.name)
var val: uint64
discard ? pb.getField(2, val)
value.n = val.int64
var bytes: seq[seq[byte]]
discard ? pb.getRepeatedField(3, bytes)
for b in bytes:
value.u.add(b)
ok(value)
proc decode*(_: type TauMessage, pb: ProtoBuffer): ProtoResult[TauMessage] =
var
value = TauMessage()
ipb: ProtoBuffer
discard ? pb.getField(1, ipb)
value.t = ? TauZeroMessage.decode(ipb)
discard ? pb.getField(2, value.signature)
ok(value)
proc decode*(_: type PubKeyMessage, pb: ProtoBuffer): ProtoResult[PubKeyMessage] =
var
value = PubKeyMessage()
discard ? pb.getField(1, value.signkey)
discard ? pb.getField(2, value.key)
ok(value)
proc decode*(_: type PorMessage, pb: ProtoBuffer): ProtoResult[PorMessage] =
var
value = PorMessage()
ipb: ProtoBuffer
discard ? pb.getField(1, ipb)
value.tau = ? TauMessage.decode(ipb)
discard ? pb.getField(2, ipb)
value.spk = ? PubKeyMessage.decode(ipb)
var
bytes: seq[seq[byte]]
discard ? pb.getRepeatedField(3, bytes)
for b in bytes:
value.authenticators.add(b)
ok(value)
proc decode*(_: type PorMessage, msg: seq[byte]): ProtoResult[PorMessage] =
PorMessage.decode(initProtoBuffer(msg))
proc decode*(_: type ProofMessage, pb: ProtoBuffer): ProtoResult[ProofMessage] =
var
value = ProofMessage()
discard ? pb.getField(1, value.mu)
discard ? pb.getField(2, value.sigma)
ok(value)
func decode*(_: type PoREnvelope, msg: openArray[byte]): ?!PoREnvelope =
var
value = PoREnvelope()
pb = initProtoBuffer(msg)
discard ? pb.getField(1, ? value.por.decode)
discard ? pb.getField(2, ? value.proof.decode)
ok(value)

View File

@ -1,166 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/stew/results
import pkg/stew/objects
import pkg/blscurve
import pkg/blscurve/blst/blst_abi
import ./messages
export messages
import ../por
func toMessage*(self: Proof): ProofMessage =
var
message = ProofMessage()
sigma: array[96, byte]
for mu in self.mu:
var
serialized: array[32, byte]
blst_bendian_from_scalar(serialized, mu)
message.mu.add(toSeq(serialized))
blst_p1_serialize(sigma, self.sigma)
message.sigma = toSeq(sigma)
message
func fromMessage*(self: ProofMessage): Result[Proof, string] =
var
proof = Proof()
sigmaAffine: blst_p1_affine
if blst_p1_deserialize(sigmaAffine, toArray(96, self.sigma)) != BLST_SUCCESS:
return err("Unable to decompress sigma")
blst_p1_from_affine(proof.sigma, sigmaAffine)
for mu in self.mu:
var
muScalar: blst_scalar
blst_scalar_from_bendian(muScalar, toArray(32, mu))
proof.mu.add(muScalar)
ok(proof)
func toMessage*(self: TauZero): TauZeroMessage =
var
message = TauZeroMessage(
name: toSeq(self.name),
n: self.n)
for u in self.u:
var
serialized: array[96, byte]
# serialized and compresses the points
blst_p1_serialize(serialized, u)
message.u.add(toSeq(serialized))
message
func fromMessage*(self: TauZeroMessage): Result[TauZero, string] =
var
tauZero: TauZero
tauZero.name = toArray(512, self.name)
tauZero.n = self.n
for u in self.u:
var
uuAffine: blst_p1_affine
uu: blst_p1
if blst_p1_deserialize(uuAffine, toArray(96, u)) != BLST_SUCCESS:
return err("Unable to decompress u")
blst_p1_from_affine(uu, uuAffine)
tauZero.u.add(uu)
ok(tauZero)
func toMessage*(self: Tau): TauMessage =
TauMessage(
t: self.t.toMessage(),
signature: toSeq(self.signature)) # signature is already in serialized form
func fromMessage*(self: TauMessage): Result[Tau, string] =
var
message = Tau(
t: ? self.t.fromMessage(),
signature: toArray(96, self.signature))
ok(message)
func toMessage*(self: por.PublicKey): PubKeyMessage =
var
signkey = toSeq(self.signkey.exportUncompressed())
message = PubKeyMessage(signkey: signkey)
key: array[192, byte]
blst_p2_serialize(key, self.key)
message.key = toSeq(key)
message
func fromMessage*(self: PubKeyMessage): Result[por.PublicKey, string] =
var
spk: por.PublicKey
keyAffine: blst_p2_affine
if not spk.signkey.fromBytes(self.signkey.toOpenArray(0, 95)):
return err("Unable to deserialize public key!")
if blst_p2_deserialize(keyAffine, toArray(192, self.key)) != BLST_SUCCESS:
return err("Unable to decompress key!")
blst_p2_from_affine(spk.key, keyAffine)
ok(spk)
func toMessage*(self: PoR): PorMessage =
var
message = PorMessage(
tau: self.tau.toMessage(),
spk: self.spk.toMessage())
for sigma in self.authenticators:
var
serialized: array[96, byte]
blst_p1_serialize(serialized, sigma)
message.authenticators.add(toSeq(serialized))
message
func fromMessage*(self: PorMessage): Result[PoR, string] =
var
por = PoR(
tau: ? self.tau.fromMessage(),
spk: ? self.spk.fromMessage())
for sigma in self.authenticators:
var
sigmaAffine: blst_p1_affine
authenticator: blst_p1
if blst_p1_deserialize(sigmaAffine, toArray(96, sigma)) != BLST_SUCCESS:
return err("Unable to decompress sigma")
blst_p1_from_affine(authenticator, sigmaAffine)
por.authenticators.add(authenticator)
return ok(por)

View File

@ -1,100 +0,0 @@
## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/chronos
import pkg/chronicles
import pkg/questionable
import pkg/questionable/results
import pkg/contractabi/address as ca
import ../stores
import ../manifest
import ../streams
import ../utils
import ./por
import ./stpnetwork
import ./stpproto
import ./stpstore
export stpnetwork, stpstore, por, stpproto
type
StorageProofs* = object
store*: BlockStore
network*: StpNetwork
stpStore*: StpStore
proc upload*(
self: StorageProofs,
cid: Cid,
indexes: seq[int],
host: ca.Address
): Future[?!void] {.async.} =
## Upload authenticators
##
without por =? (await self.stpStore.retrieve(cid)):
trace "Unable to retrieve por data from store", cid
return failure("Unable to retrieve por data from store")
return await self.network.uploadTags(
cid,
indexes,
por.authenticators,
host)
# proc proof*() =
# discard
# proc verify*() =
# discard
proc setupProofs*(
self: StorageProofs,
manifest: Manifest
): Future[?!void] {.async.} =
## Setup storage authentication
##
without cid =? manifest.cid:
return failure("Unable to retrieve Cid from manifest!")
let
(spk, ssk) = keyGen()
por = await PoR.init(
StoreStream.new(self.store, manifest),
ssk,
spk,
manifest.blockSize)
return await self.stpStore.store(por.toMessage(), cid)
proc init*(
T: type StorageProofs,
network: StpNetwork,
store: BlockStore,
stpStore: StpStore
): StorageProofs =
var
self = T(
store: store,
stpStore: stpStore,
network: network)
proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} =
try:
await self.stpStore.store(msg.cid, msg.tags).tryGet()
trace "Stored tags", cid = $msg.cid, tags = msg.tags.len
except CatchableError as exc:
trace "Exception attempting to store tags", exc = exc.msg
self.network.tagsHandler = tagsHandler
self

View File

@ -1,106 +0,0 @@
## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/chronos
import pkg/libp2p
import pkg/chronicles
import pkg/questionable/results
import pkg/contractabi/address as ca
import ./stpproto
import ../discovery
import ../formats
const
Codec* = "/dagger/storageproofs/1.0.0"
MaxMessageSize* = 1 shl 22 # 4MB
logScope:
topics = "dagger storageproofs network"
type
TagsHandler* = proc(msg: TagsMessage):
Future[void] {.raises: [Defect], gcsafe.}
StpNetwork* = ref object of LPProtocol
switch*: Switch
discovery*: Discovery
tagsHandle*: TagsHandler
proc uploadTags*(
self: StpNetwork,
cid: Cid,
indexes: seq[int],
tags: seq[seq[byte]],
host: ca.Address
): Future[?!void] {.async.} =
# Upload tags to `host`
#
var msg = TagsMessage(cid: cid.data.buffer)
for i in indexes:
msg.tags.add(Tag(idx: i, tag: tags[i]))
let
peers = await self.discovery.find(host)
connFut = await one(peers.mapIt(
self.switch.dial(
it.data.peerId,
it.data.addresses.mapIt( it.address ),
@[Codec])))
conn = await connFut
try:
await conn.writeLp(msg.encode)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "Exception submitting tags", cid, exc = exc.msg
return failure(exc.msg)
finally:
await conn.close()
return success()
method init*(self: StpNetwork) =
## Perform protocol initialization
##
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
try:
let
msg = await conn.readLp(MaxMessageSize)
res = TagsMessage.decode(msg)
if not self.tagsHandle.isNil:
if res.isOk and res.get.tags.len > 0:
await self.tagsHandle(res.get)
except CatchableError as exc:
trace "Exception handling Storage Proofs message", exc = exc.msg
finally:
await conn.close()
self.handler = handle
self.codec = Codec
proc new*(
T: type StpNetwork,
switch: Switch,
discovery: Discovery
): StpNetwork =
## create a new StpNetwork instance
let
self = StpNetwork(
switch: switch,
discovery: discovery)
self.init()
self

View File

@ -1,3 +0,0 @@
import ./stpproto/messages
export messages

View File

@ -1,66 +0,0 @@
## Nim-Codex
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import pkg/questionable/results
import pkg/libp2p/protobuf/minprotobuf
type
Tag* = object
idx*: int64
tag*: seq[byte]
TagsMessage* = object
cid*: seq[byte]
tags*: seq[Tag]
func write*(pb: var ProtoBuffer, field: int, value: Tag) =
var ipb = initProtoBuffer()
ipb.write(1, value.idx.uint64)
ipb.write(2, value.tag)
ipb.finish()
pb.write(field, ipb)
func encode*(msg: TagsMessage): seq[byte] =
var ipb = initProtoBuffer()
ipb.write(1, msg.cid)
for tag in msg.tags:
ipb.write(2, tag)
ipb.finish()
ipb.buffer
func decode*(_: type Tag, pb: ProtoBuffer): ProtoResult[Tag] =
var
value = Tag()
idx: uint64
discard ? pb.getField(1, idx)
value.idx = idx.int64
discard ? pb.getField(2, value.tag)
ok(value)
func decode*(_: type TagsMessage, msg: openArray[byte]): ProtoResult[TagsMessage] =
var
value = TagsMessage()
pb = initProtoBuffer(msg)
discard ? pb.getField(1, value.cid)
var
bytes: seq[seq[byte]]
discard ? pb.getRepeatedField(2, bytes)
for b in bytes:
value.tags.add(? Tag.decode(initProtoBuffer(b)))
ok(value)

View File

@ -1,130 +0,0 @@
## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/os
import std/strformat
import pkg/libp2p
import pkg/chronos
import pkg/chronicles
import pkg/stew/io2
import pkg/questionable
import pkg/questionable/results
import ../errors
import ../formats
import ./stpproto
import ./por
type
StpStore* = object
authDir*: string
postfixLen*: int
template stpPath*(self: StpStore, cid: Cid): string =
self.authDir / ($cid)[^self.postfixLen..^1] / $cid
proc retrieve*(
self: StpStore,
cid: Cid
): Future[?!PorMessage] {.async.} =
## Retrieve authenticators from data store
##
let path = self.stpPath(cid) / "por"
var data: seq[byte]
if (
let res = io2.readFile(path, data);
res.isErr):
let error = io2.ioErrorMsg(res.error)
trace "Cannot retrieve storage proof data from fs", path , error
return failure("Cannot retrieve storage proof data from fs")
return PorMessage.decode(data).mapFailure(CatchableError)
proc store*(
self: StpStore,
por: PorMessage,
cid: Cid
): Future[?!void] {.async.} =
## Persist storage proofs
##
let
dir = self.stpPath(cid)
if io2.createPath(dir).isErr:
trace "Unable to create storage proofs prefix dir", dir
return failure(&"Unable to create storage proofs prefix dir ${dir}")
let path = dir / "por"
if (
let res = io2.writeFile(path, por.encode());
res.isErr):
let error = io2.ioErrorMsg(res.error)
trace "Unable to store storage proofs", path, cid, error
return failure(
&"Unable to store storage proofs - path = ${path} cid = ${cid} error = ${error}")
return success()
proc retrieve*(
self: StpStore,
cid: Cid,
blocks: seq[int]
): Future[?!seq[Tag]] {.async.} =
var tags: seq[Tag]
for b in blocks:
var tag = Tag(idx: b)
let path = self.stpPath(cid) / $b
if (
let res = io2.readFile(path, tag.tag);
res.isErr):
let error = io2.ioErrorMsg(res.error)
trace "Cannot retrieve tags from fs", path , error
return failure("Cannot retrieve tags from fs")
tags.add(tag)
return tags.success
proc store*(
self: StpStore,
tags: seq[Tag],
cid: Cid
): Future[?!void] {.async.} =
let
dir = self.stpPath(cid)
if io2.createPath(dir).isErr:
trace "Unable to create storage proofs prefix dir", dir
return failure(&"Unable to create storage proofs prefix dir ${dir}")
for t in tags:
let path = dir / $t.idx
if (
let res = io2.writeFile(path, t.tag);
res.isErr):
let error = io2.ioErrorMsg(res.error)
trace "Unable to store tags", path, cid, error
return failure(
&"Unable to store tags - path = ${path} cid = ${cid} error = ${error}")
return success()
proc init*(
T: type StpStore,
authDir: string,
postfixLen: int = 2
): StpStore =
## Init StpStore
##
StpStore(
authDir: authDir,
postfixLen: postfixLen)

View File

@ -1,117 +0,0 @@
import std/sequtils
import pkg/asynctest
import pkg/chronos
import pkg/libp2p/errors
import pkg/contractabi as ca
import pkg/codex/rng
import pkg/codex/chunker
import pkg/codex/storageproofs
import pkg/codex/discovery
import pkg/codex/manifest
import pkg/codex/stores
import pkg/codex/storageproofs as st
import pkg/codex/blocktype as bt
import pkg/codex/streams
import ../examples
import ../helpers
const
BlockSize = 31'nb * 64
DataSetSize = BlockSize * 100
asyncchecksuite "Storage Proofs Network":
let
hostAddr = ca.Address.example
blocks = toSeq([1, 5, 10, 14, 20, 12, 22]) # TODO: maybe make them random
var
stpNetwork1: StpNetwork
stpNetwork2: StpNetwork
switch1: Switch
switch2: Switch
discovery1: MockDiscovery
discovery2: MockDiscovery
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
porMsg: PorMessage
cid: Cid
porStream: StoreStream
por: PoR
tags: seq[Tag]
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
(spk, ssk) = st.keyGen()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
cid = manifest.cid.tryGet()
porStream = StoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk, spk,
BlockSize.int)
porMsg = por.toMessage()
tags = blocks.mapIt(
Tag(idx: it, tag: porMsg.authenticators[it]))
switch1 = newStandardSwitch()
switch2 = newStandardSwitch()
discovery1 = MockDiscovery.new()
discovery2 = MockDiscovery.new()
stpNetwork1 = StpNetwork.new(switch1, discovery1)
stpNetwork2 = StpNetwork.new(switch2, discovery2)
switch1.mount(stpNetwork1)
switch2.mount(stpNetwork2)
await switch1.start()
await switch2.start()
teardown:
await switch1.stop()
await switch2.stop()
await close(porStream)
test "Should upload to host":
var
done = newFuture[void]()
discovery1.findHostProvidersHandler = proc(d: MockDiscovery, host: ca.Address):
Future[seq[SignedPeerRecord]] {.async, gcsafe.} =
check hostAddr == host
return @[switch2.peerInfo.signedPeerRecord]
proc tagsHandler(msg: TagsMessage) {.async, gcsafe.} =
check:
Cid.init(msg.cid).tryGet() == cid
msg.tags == tags
done.complete()
stpNetwork2.tagsHandle = tagsHandler
(await stpNetwork1.uploadTags(
cid,
blocks,
porMsg.authenticators,
hostAddr)).tryGet()
await done.wait(1.seconds)

View File

@ -1,175 +0,0 @@
import pkg/chronos
import pkg/asynctest
import pkg/blscurve/blst/blst_abi
import pkg/codex/streams
import pkg/codex/storageproofs as st
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/chunker
import pkg/codex/rng
import pkg/codex/blocktype as bt
import ../helpers
const
BlockSize = 31'nb * 4
SectorSize = 31'nb
SectorsPerBlock = BlockSize div SectorSize
DataSetSize = BlockSize * 100
asyncchecksuite "BLS PoR":
var
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
porStream: StoreStream
proofStream: StoreStream
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
(spk, ssk) = st.keyGen()
porStream = StoreStream.new(store, manifest)
proofStream = StoreStream.new(store, manifest)
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
teardown:
await close(porStream)
await close(proofStream)
proc createPor(): Future[PoR] =
return PoR.init(
porStream,
ssk,
spk,
BlockSize.int)
proc createProof(por: PoR, q: seq[QElement]): Future[Proof] =
return generateProof(
proofStream,
q,
por.authenticators,
SectorsPerBlock)
test "Test PoR without corruption":
let
por = await createPor()
q = generateQuery(por.tau, 22)
proof = await createProof(por, q)
check por.verifyProof(q, proof.mu, proof.sigma)
test "Test PoR with corruption - query: 22, corrupted blocks: 300, bytes: 10":
let
por = await createPor()
pos = await store.corruptBlocks(manifest, 30, 10)
q = generateQuery(por.tau, 22)
proof = await createProof(por, q)
check pos.len == 30
check not por.verifyProof(q, proof.mu, proof.sigma)
asyncchecksuite "Test Serialization":
var
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
por: PoR
q: seq[QElement]
proof: Proof
porStream: StoreStream
proofStream: StoreStream
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
(spk, ssk) = st.keyGen()
porStream = StoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk,
spk,
BlockSize.int)
q = generateQuery(por.tau, 22)
proofStream = StoreStream.new(store, manifest)
proof = await generateProof(
proofStream,
q,
por.authenticators,
SectorsPerBlock)
teardown:
await close(porStream)
await close(proofStream)
test "Serialize Public Key":
var
spkMessage = spk.toMessage()
check:
spk.signkey == spkMessage.fromMessage().tryGet().signkey
spk.key.blst_p2_is_equal(spkMessage.fromMessage().tryGet().key).bool
test "Serialize TauZero":
var
tauZeroMessage = por.tau.t.toMessage()
tauZero = tauZeroMessage.fromMessage().tryGet()
check:
por.tau.t.name == tauZero.name
por.tau.t.n == tauZero.n
for i in 0..<por.tau.t.u.len:
check blst_p1_is_equal(por.tau.t.u[i], tauZero.u[i]).bool
test "Serialize Tau":
var
tauMessage = por.tau.toMessage()
tau = tauMessage.fromMessage().tryGet()
check:
por.tau.signature == tau.signature
test "Serialize PoR":
let
porMessage = por.toMessage()
ppor = porMessage.fromMessage().tryGet()
for i in 0..<por.authenticators.len:
check blst_p1_is_equal(por.authenticators[i], ppor.authenticators[i]).bool
test "Serialize Proof":
let
proofMessage = proof.toMessage()
pproof = proofMessage.fromMessage().tryGet()
check:
proof.sigma.blst_p1_is_equal(pproof.sigma).bool
proof.mu == pproof.mu
check por.verifyProof(q, pproof.mu, pproof.sigma)

View File

@ -1,84 +0,0 @@
import std/os
import std/sequtils
import pkg/chronos
import pkg/asynctest
import pkg/codex/rng
import pkg/codex/streams
import pkg/codex/storageproofs as st
import pkg/codex/blocktype as bt
import ../helpers
const
BlockSize = 31'nb * 64'nb
DataSetSize = BlockSize * 100
asyncchecksuite "Test PoR store":
let
blocks = toSeq([1, 5, 10, 14, 20, 12, 22]) # TODO: maybe make them random
var
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
repoDir: string
stpstore: st.StpStore
porStream: StoreStream
por: PoR
porMsg: PorMessage
cid: Cid
tags: seq[Tag]
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize.int, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
(spk, ssk) = st.keyGen()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
(await store.putBlock(blk)).tryGet()
cid = manifest.cid.tryGet()
porStream = StoreStream.new(store, manifest)
por = await PoR.init(
porStream,
ssk, spk,
BlockSize.int)
porMsg = por.toMessage()
tags = blocks.mapIt(
Tag(idx: it, tag: porMsg.authenticators[it]) )
repoDir = getAppDir() / "stp"
createDir(repoDir)
stpstore = st.StpStore.init(repoDir)
teardown:
await close(porStream)
removeDir(repoDir)
test "Should store Storage Proofs":
check (await stpstore.store(por.toMessage(), cid)).isOk
check fileExists(stpstore.stpPath(cid) / "por")
test "Should retrieve Storage Proofs":
discard await stpstore.store(por.toMessage(), cid)
check (await stpstore.retrieve(cid)).tryGet() == porMsg
test "Should store tags":
check (await stpstore.store(tags, cid)).isOk
for t in tags:
check fileExists(stpstore.stpPath(cid) / $t.idx )
test "Should retrieve tags":
discard await stpstore.store(tags, cid)
check (await stpstore.retrieve(cid, blocks)).tryGet() == tags

View File

@ -1,5 +0,0 @@
import ./storageproofs/teststpstore
import ./storageproofs/testpor
import ./storageproofs/testnetwork
{.warning[UnusedImport]: off.}

View File

@ -1,6 +1,5 @@
import ./codex/teststores
import ./codex/testblockexchange
import ./codex/teststorageproofs
import ./codex/testasyncheapqueue
import ./codex/testchunking
import ./codex/testmanifest