Por serialize (#106)

* move por into storage proofs

* use SeekableStream

* adding serialization for por

* remove leftovers

* add empty block support

* add basic por test

* rename block exchange for consistency

* add storageproofstests

* moving timing to storageproofs

* fix imports

* fix imports

* fix imports

* add top level exports

* move delete blocks helper to helpers

* more import/export fixes

* cleanup

* more import fixes

* fix unused warnings

* detect corrupt blocks tests

* add serialization tests

* move init method around

* bump asynctest

* fix CID version

* get rid of warning

* wip: fix CI

* increase CI timeout
This commit is contained in:
Dmitriy Ryajov 2022-05-23 23:24:15 -06:00 committed by GitHub
parent 6ad7a6bb96
commit 56b80d6f6d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 697 additions and 190 deletions

View File

@ -36,7 +36,7 @@ jobs:
builder: windows-2019 builder: windows-2019
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch }}' name: '${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch }}'
runs-on: ${{ matrix.builder }} runs-on: ${{ matrix.builder }}
timeout-minutes: 40 timeout-minutes: 60
steps: steps:
- name: Checkout nim-codex - name: Checkout nim-codex
uses: actions/checkout@v2 uses: actions/checkout@v2

View File

@ -59,7 +59,7 @@ template EmptyDigests*: untyped =
.get() .get()
}.toTable, }.toTable,
CIDv1: { CIDv1: {
multiCodec("sha2-256"): EmptyCid[CIDv0] multiCodec("sha2-256"): EmptyCid[CIDv1]
.catch .catch
.get()[multiCodec("sha2-256")] .get()[multiCodec("sha2-256")]
.catch .catch

View File

@ -1,5 +1,5 @@
import pkg/ethers import pkg/ethers
import ../por/timing/proofs import ../storageproofs/timing/proofs
import ./storage import ./storage
export proofs export proofs

View File

@ -1 +0,0 @@
Nim implementation of Proof of Storage related schemes

View File

@ -1,9 +0,0 @@
import times, strutils
export strutils.formatFloat
template benchmark*(benchmarkName: string, code: untyped) =
let t0 = epochTime()
code
let elapsed = epochTime() - t0
let elapsedStr = elapsed.formatFloat(format = ffDecimal, precision = 3)
echo "CPU Time [", benchmarkName, "] ", elapsedStr, "s"

View File

@ -1 +0,0 @@
The quick brown fox jumps over the lazy dog!

View File

@ -1,37 +0,0 @@
## Nim-POS
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import por
import benchmark
import strutils
const sectorsperblock = 1024.int64
const querylen = 22
proc testbls() : bool =
benchmark "Key generation":
let (spk, ssk) = por.keygen()
benchmark "Auth generation (s=" & $sectorsperblock & ")":
let (tau, authenticators) = por.setup(ssk, sectorsperblock, "example.txt")
#echo "Auth: ", authenticators
benchmark "Generating challenge (q=" & $querylen & ")":
let q = por.generateQuery(tau, spk, querylen)
#echo "Generated!" #, " q:", q
benchmark "Issuing proof":
let (mu, sigma) = por.generateProof(q, authenticators, spk, sectorsperblock, "example.txt")
#echo "Issued!" #, " mu:", mu, " sigma:", sigma
benchmark "Verifying proof":
result = por.verifyProof(tau, q, mu, sigma, spk)
echo "Result: ", result
let r = testbls()

View File

@ -2,11 +2,11 @@ import std/sets
import pkg/upraises import pkg/upraises
import pkg/questionable import pkg/questionable
import pkg/chronicles import pkg/chronicles
import ./por/timing/proofs import ./storageproofs
import ./clock import ./clock
export sets export sets
export proofs export storageproofs
type type
Proving* = ref object Proving* = ref object

View File

@ -47,10 +47,10 @@ proc decodeString(T: type Cid, value: string): Result[Cid, cstring] =
.init(value) .init(value)
.mapErr do(e: CidError) -> cstring: .mapErr do(e: CidError) -> cstring:
case e case e
of CidError.Incorrect: "Incorrect Cid" of CidError.Incorrect: "Incorrect Cid".cstring
of CidError.Unsupported: "Unsupported Cid" of CidError.Unsupported: "Unsupported Cid".cstring
of CidError.Overrun: "Overrun Cid" of CidError.Overrun: "Overrun Cid".cstring
else: "Error parsing Cid" else: "Error parsing Cid".cstring
proc encodeString(peerId: PeerID): Result[string, cstring] = proc encodeString(peerId: PeerID): Result[string, cstring] =
ok($peerId) ok($peerId)

4
codex/storageproofs.nim Normal file
View File

@ -0,0 +1,4 @@
import ./storageproofs/por
import ./storageproofs/timing
export por, timing

View File

@ -0,0 +1,4 @@
import ./por/serialization
import ./por/por
export por, serialization

View File

@ -39,7 +39,7 @@
# Our implementation uses additive cyclic groups instead of the multiplicative # Our implementation uses additive cyclic groups instead of the multiplicative
# cyclic group in the paper, thus changing the name of the group operation as in # cyclic group in the paper, thus changing the name of the group operation as in
# blscurve and blst. Thus, point multiplication becomes point addition, and scalar # blscurve and blst. Thus, point multiplication becomes point addition, and scalar
# exponentiation becomes scalar multiplication. # exponentiation becomes scalar multiplicaiton.
# #
# Number of operations: # Number of operations:
# The following table summarizes the number of operations in different phases # The following table summarizes the number of operations in different phases
@ -77,77 +77,108 @@
# q * (8 + 48) bytes # q * (8 + 48) bytes
# The size of the proof is instead # The size of the proof is instead
# s * 32 + 48 bytes # s * 32 + 48 bytes
import std/endians
import blscurve import pkg/chronos
import blscurve/blst/blst_abi import pkg/blscurve
import ../rng import pkg/blscurve/blst/blst_abi
import endians
import ../../rng
import ../../streams
# sector size in bytes. Must be smaller than the subgroup order r # sector size in bytes. Must be smaller than the subgroup order r
# which is 255 bits long for BLS12-381 # which is 255 bits long for BLS12-381
const bytespersector = 31 const
BytesPerSector* = 31
# length in bytes of the unique (random) name # length in bytes of the unique (random) name
const namelen = 512 Namelen = 512
type type
# a single sector # a single sector
ZChar = array[bytespersector, byte] ZChar* = array[BytesPerSector, byte]
# secret key combining the metadata signing key and the POR generation key # secret key combining the metadata signing key and the POR generation key
SecretKey = object SecretKey* = object
signkey: blscurve.SecretKey signkey*: blscurve.SecretKey
key: blst_scalar key*: blst_scalar
# public key combining the metadata signing key and the POR validation key # public key combining the metadata signing key and the POR validation key
PublicKey = object PublicKey* = object
signkey: blscurve.PublicKey signkey*: blscurve.PublicKey
key: blst_p2 key*: blst_p2
# POR metadata (called "file tag t_0" in the original paper) # POR metadata (called "file tag t_0" in the original paper)
TauZero = object TauZero* = object
name: array[namelen, byte] name*: array[Namelen, byte]
n: int64 n*: int64
u: seq[blst_p1] u*: seq[blst_p1]
# signed POR metadata (called "signed file tag t" in the original paper) # signed POR metadata (called "signed file tag t" in the original paper)
Tau = object Tau* = object
t: TauZero t*: TauZero
signature: array[96, byte] signature*: array[96, byte]
Proof* = object
mu*: seq[blst_scalar]
sigma*: blst_p1
# PoR query element # PoR query element
QElement = object QElement* = object
I: int64 I*: int64
V: blst_scalar V*: blst_scalar
PoR* = object
ssk*: SecretKey
spk*: PublicKey
tau*: Tau
authenticators*: seq[blst_p1]
proc fromBytesBE(a: array[32, byte]): blst_scalar = proc fromBytesBE(a: array[32, byte]): blst_scalar =
## Convert data to blst native form ## Convert data to blst native form
##
blst_scalar_from_bendian(result, a) blst_scalar_from_bendian(result, a)
doAssert(blst_scalar_fr_check(result).bool) doAssert(blst_scalar_fr_check(result).bool)
proc fromBytesBE(a: openArray[byte]): blst_scalar = proc fromBytesBE(a: openArray[byte]): blst_scalar =
## Convert data to blst native form ## Convert data to blst native form
##
var b: array[32, byte] var b: array[32, byte]
doAssert(a.len <= b.len) doAssert(a.len <= b.len)
let d = b.len - a.len let d = b.len - a.len
for i in 0 ..< a.len: for i in 0..<a.len:
b[i+d] = a[i] b[i+d] = a[i]
blst_scalar_from_bendian(result, b) blst_scalar_from_bendian(result, b)
doAssert(blst_scalar_fr_check(result).bool) doAssert(blst_scalar_fr_check(result).bool)
proc getSector(f: File, blockid: int64, sectorid: int64, spb: int64): ZChar = proc getSector(
stream: SeekableStream,
blockId: int64,
sectorId: int64,
spb: int64): Future[ZChar] {.async.} =
## Read file sector at given <blockid, sectorid> postion ## Read file sector at given <blockid, sectorid> postion
f.setFilePos((blockid * spb + sectorid) * sizeof(result)) ##
let r = f.readBytes(result, 0, sizeof(result))
var res: ZChar
stream.setPos(((blockid * spb + sectorid) * ZChar.len).int)
discard await stream.readOnce(addr res[0], ZChar.len)
return res
proc rndScalar(): blst_scalar = proc rndScalar(): blst_scalar =
## Generate random scalar within the subroup order r ## Generate random scalar within the subroup order r
var scal{.noInit.}: array[32, byte] ##
var scalar{.noInit.}: blst_scalar
var scal {.noInit.}: array[32, byte]
var scalar {.noInit.}: blst_scalar
while true: while true:
for val in scal.mitems: for val in scal.mitems:
val = byte Rng.instance.rand(0xFF) val = byte Rng.instance.rand(0xFF)
scalar.blst_scalar_from_bendian(scal) scalar.blst_scalar_from_bendian(scal)
if blst_scalar_fr_check(scalar).bool: if blst_scalar_fr_check(scalar).bool:
break break
@ -156,55 +187,77 @@ proc rndScalar(): blst_scalar =
proc rndP2(): (blst_p2, blst_scalar) = proc rndP2(): (blst_p2, blst_scalar) =
## Generate random point on G2 ## Generate random point on G2
var x{.noInit.}: blst_p2 ##
var
x {.noInit.}: blst_p2
x.blst_p2_from_affine(BLS12_381_G2) # init from generator x.blst_p2_from_affine(BLS12_381_G2) # init from generator
let scalar = rndScalar()
let
scalar = rndScalar()
x.blst_p2_mult(x, scalar, 255) x.blst_p2_mult(x, scalar, 255)
return (x, scalar) return (x, scalar)
proc rndP1(): (blst_p1, blst_scalar) = proc rndP1(): (blst_p1, blst_scalar) =
## Generate random point on G1 ## Generate random point on G1
var x{.noInit.}: blst_p1 var
x {.noInit.}: blst_p1
x.blst_p1_from_affine(BLS12_381_G1) # init from generator x.blst_p1_from_affine(BLS12_381_G1) # init from generator
let scalar = rndScalar()
let
scalar = rndScalar()
x.blst_p1_mult(x, scalar, 255) x.blst_p1_mult(x, scalar, 255)
return (x, scalar) return (x, scalar)
proc posKeygen(): (blst_p2, blst_scalar) = template posKeygen(): (blst_p2, blst_scalar) =
## Generate POS key pair ## Generate POS key pair
##
rndP2() rndP2()
proc keygen*(): (PublicKey, SecretKey) = proc keyGen*(): (PublicKey, SecretKey) =
## Generate key pair for signing metadata and for POS tags ## Generate key pair for signing metadata and for POS tags
var pk: PublicKey ##
var sk: SecretKey
var ikm: array[32, byte] var
pk: PublicKey
sk: SecretKey
ikm: array[32, byte]
for b in ikm.mitems: for b in ikm.mitems:
b = byte Rng.instance.rand(0xFF) b = byte Rng.instance.rand(0xFF)
doAssert ikm.keyGen(pk.signkey, sk.signkey) doAssert ikm.keyGen(pk.signkey, sk.signkey)
(pk.key, sk.key) = posKeygen() (pk.key, sk.key) = posKeygen()
return (pk, sk) return (pk, sk)
proc split(f: File, s: int64): int64 = proc sectorsCount(stream: SeekableStream, s: int64): int64 =
## Calculate number of blocks for a file ## Calculate number of blocks for a file
let size = f.getFileSize() ##
let n = ((size - 1) div (s * sizeof(ZChar))) + 1
echo "File size=", size, " bytes", let
", blocks=", n, size = stream.size()
", sectors/block=", $s, n = ((size - 1) div (s * sizeof(ZChar))) + 1
", sectorsize=", $sizeof(ZChar), " bytes" # debugEcho "File size=", size, " bytes",
# ", blocks=", n,
# ", sectors/block=", $s,
# ", sectorsize=", $sizeof(ZChar), " bytes"
return n return n
proc hashToG1[T: byte|char](msg: openArray[T]): blst_p1 = proc hashToG1[T: byte|char](msg: openArray[T]): blst_p1 =
## Hash to curve with Codex specific domain separation ## Hash to curve with Dagger specific domain separation
const dst = "CODEX-PROOF-OF-CONCEPT" ##
const dst = "DAGGER-PROOF-OF-CONCEPT"
result.blst_hash_to_g1(msg, dst, aug = "") result.blst_hash_to_g1(msg, dst, aug = "")
proc hashNameI(name: array[namelen, byte], i: int64): blst_p1 = proc hashNameI(name: array[Namelen, byte], i: int64): blst_p1 =
## Calculate unique filname and block index based hash ## Calculate unique filename and block index based hash
##
# # naive implementation, hashing a long string representation # # naive implementation, hashing a long string representation
# # such as "[255, 242, 23]1" # # such as "[255, 242, 23]1"
@ -216,20 +269,33 @@ proc hashNameI(name: array[namelen, byte], i: int64): blst_p1 =
bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i)) bigEndian64(addr(namei[sizeof(name)]), unsafeAddr(i))
return hashToG1(namei) return hashToG1(namei)
proc generateAuthenticatorNaive(i: int64, s: int64, t: TauZero, f: File, ssk: SecretKey): blst_p1 = proc generateAuthenticatorNaive(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero): Future[blst_p1] {.async.} =
## Naive implementation of authenticator as in the S&W paper. ## Naive implementation of authenticator as in the S&W paper.
## With the paper's multiplicative notation: ## With the paper's multiplicative notation:
## \sigmai=\(H(file||i)\cdot\prod{j=0}^{s-1}{uj^{m[i][j]}})^{\alpha} ## \sigmai=\(H(file||i)\cdot\prod{j=0}^{s-1}{uj^{m[i][j]}})^{\alpha}
##
var sum: blst_p1 var sum: blst_p1
for j in 0 ..< s: for j in 0..<s:
var prod: blst_p1 var prod: blst_p1
prod.blst_p1_mult(t.u[j], fromBytesBE(getSector(f, i, j, s)), 255) prod.blst_p1_mult(t.u[j], fromBytesBE((await stream.getSector(i, j, s))), 255)
sum.blst_p1_add_or_double(sum, prod) sum.blst_p1_add_or_double(sum, prod)
blst_p1_add_or_double(result, hashNameI(t.name, i), sum) blst_p1_add_or_double(result, hashNameI(t.name, i), sum)
result.blst_p1_mult(result, ssk.key, 255) result.blst_p1_mult(result, ssk.key, 255)
proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[blst_scalar], f: File, ssk: SecretKey): blst_p1 = proc generateAuthenticatorOpt(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero,
ubase: seq[blst_scalar]): Future[blst_p1] {.async.} =
## Optimized implementation of authenticator generation ## Optimized implementation of authenticator generation
## This implementation is reduces the number of scalar multiplications ## This implementation is reduces the number of scalar multiplications
## from s+1 to 1+1 , using knowledge about the scalars (r_j) ## from s+1 to 1+1 , using knowledge about the scalars (r_j)
@ -237,12 +303,14 @@ proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[b
## ##
## With the paper's multiplicative notation, we use: ## With the paper's multiplicative notation, we use:
## (H(file||i)\cdot g^{\sum{j=0}^{s-1}{r_j \cdot m[i][j]}})^{\alpha} ## (H(file||i)\cdot g^{\sum{j=0}^{s-1}{r_j \cdot m[i][j]}})^{\alpha}
##
var sum: blst_fr var sum: blst_fr
var sums: blst_scalar var sums: blst_scalar
for j in 0 ..< s: for j in 0..<s:
var a, b, x: blst_fr var a, b, x: blst_fr
a.blst_fr_from_scalar(ubase[j]) a.blst_fr_from_scalar(ubase[j])
b.blst_fr_from_scalar(fromBytesBE(getSector(f, i, j, s))) b.blst_fr_from_scalar(fromBytesBE((await stream.getSector(i, j, s))))
x.blst_fr_mul(a, b) x.blst_fr_mul(a, b)
sum.blst_fr_add(sum, x) sum.blst_fr_add(sum, x)
sums.blst_scalar_from_fr(sum) sums.blst_scalar_from_fr(sum)
@ -253,88 +321,89 @@ proc generateAuthenticatorOpt(i: int64, s: int64, t: TauZero, ubase: openArray[b
result.blst_p1_add_or_double(result, hashNameI(t.name, i)) result.blst_p1_add_or_double(result, hashNameI(t.name, i))
result.blst_p1_mult(result, ssk.key, 255) result.blst_p1_mult(result, ssk.key, 255)
proc generateAuthenticator(i: int64, s: int64, t: TauZero, ubase: openArray[blst_scalar], f: File, ssk: SecretKey): blst_p1 = proc generateAuthenticator(
stream: SeekableStream,
ssk: SecretKey,
i: int64,
s: int64,
t: TauZero,
ubase: seq[blst_scalar]): Future[blst_p1] =
## Wrapper to select tag generator implementation ## Wrapper to select tag generator implementation
##
# let a = generateAuthenticatorNaive(i, s, t, f, ssk) # let a = generateAuthenticatorNaive(i, s, t, f, ssk)
let b = generateAuthenticatorOpt(i, s, t, ubase, f, ssk) return generateAuthenticatorOpt(stream, ssk, i, s, t, ubase)
# doAssert(a.blst_p1_is_equal(b).bool) # doAssert(a.blst_p1_is_equal(b).bool)
return b
proc setup*(ssk: SecretKey, s:int64, filename: string): (Tau, seq[blst_p1]) = proc generateQuery*(tau: Tau, l: int): seq[QElement] =
## Set up the POR scheme by generating tags and metadata ## Generata a random BLS query of given size
let file = open(filename) ##
let n = split(file, s)
var t = TauZero(n: n)
# generate a random name
for i in 0 ..< 512 :
t.name[i] = byte Rng.instance.rand(0xFF)
# generate the coefficient vector for combining sectors of a block: U
var ubase: seq[blst_scalar]
for i in 0 ..< s :
let (u, ub) = rndP1()
t.u.add(u)
ubase.add(ub)
#TODO: a better bytearray conversion of TauZero for the signature might be needed
# the current conversion using $t might be architecture dependent and not unique
let signature = sign(ssk.signkey, $t)
let tau = Tau(t: t, signature: signature.exportRaw())
#generate sigmas
var sigmas: seq[blst_p1]
for i in 0 ..< n :
sigmas.add(generateAuthenticator(i, s, t, ubase, file, ssk))
file.close()
result = (tau, sigmas)
proc generateQuery*(tau: Tau, spk: PublicKey, l: int): seq[QElement] =
## Generata a random BLS query of given sizxe
let n = tau.t.n # number of blocks let n = tau.t.n # number of blocks
for i in 0 ..< l : for i in 0..<l:
var q: QElement var q: QElement
q.I = Rng.instance.rand(n-1) #TODO: dedup q.I = Rng.instance.rand(n-1) #TODO: dedup
q.V = rndScalar() #TODO: fix range q.V = rndScalar() #TODO: fix range
result.add(q) result.add(q)
proc generateProof*(q: openArray[QElement], authenticators: openArray[blst_p1], spk: PublicKey, s: int64, filename: string): (seq[blst_scalar], blst_p1) = proc generateProof*(
stream: SeekableStream,
q: seq[QElement],
authenticators: seq[blst_p1],
s: int64): Future[Proof] {.async.} =
## Generata BLS proofs for a given query ## Generata BLS proofs for a given query
let file = open(filename) ##
var
mu: seq[blst_scalar]
for j in 0..<s:
var
muj: blst_fr
for qelem in q:
let
sect = fromBytesBE((await stream.getSector(qelem.I, j, s)))
var
x, v, sector: blst_fr
var mu: seq[blst_scalar]
for j in 0 ..< s :
var muj: blst_fr
for qelem in q :
var x, v, sector: blst_fr
let sect = fromBytesBE(getSector(file, qelem.I, j, s))
sector.blst_fr_from_scalar(sect) sector.blst_fr_from_scalar(sect)
v.blst_fr_from_scalar(qelem.V) v.blst_fr_from_scalar(qelem.V)
x.blst_fr_mul(v, sector) x.blst_fr_mul(v, sector)
muj.blst_fr_add(muj, x) muj.blst_fr_add(muj, x)
var mujs: blst_scalar
var
mujs: blst_scalar
mujs.blst_scalar_from_fr(muj) mujs.blst_scalar_from_fr(muj)
mu.add(mujs) mu.add(mujs)
var sigma: blst_p1 var
sigma: blst_p1
for qelem in q: for qelem in q:
var prod: blst_p1 var
prod: blst_p1
prod.blst_p1_mult(authenticators[qelem.I], qelem.V, 255) prod.blst_p1_mult(authenticators[qelem.I], qelem.V, 255)
sigma.blst_p1_add_or_double(sigma, prod) sigma.blst_p1_add_or_double(sigma, prod)
file.close() return Proof(mu: mu, sigma: sigma)
return (mu, sigma)
proc pairing(a: blst_p1, b: blst_p2): blst_fp12 = proc pairing(a: blst_p1, b: blst_p2): blst_fp12 =
## Calculate pairing G_1,G_2 -> G_T ## Calculate pairing G_1,G_2 -> G_T
var aa: blst_p1_affine ##
var bb: blst_p2_affine
var
aa: blst_p1_affine
bb: blst_p2_affine
l: blst_fp12
blst_p1_to_affine(aa, a) blst_p1_to_affine(aa, a)
blst_p2_to_affine(bb, b) blst_p2_to_affine(bb, b)
var l: blst_fp12
blst_miller_loop(l, bb, aa) blst_miller_loop(l, bb, aa)
blst_final_exp(result, l) blst_final_exp(result, l)
@ -346,6 +415,8 @@ proc verifyPairingsNaive(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : b
proc verifyPairingsNeg(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = proc verifyPairingsNeg(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
## Faster pairing verification using 2 miller loops but ony one final exponentiation ## Faster pairing verification using 2 miller loops but ony one final exponentiation
## based on https://github.com/benjaminion/c-kzg/blob/main/src/bls12_381.c ## based on https://github.com/benjaminion/c-kzg/blob/main/src/bls12_381.c
##
var var
loop0, loop1, gt_point: blst_fp12 loop0, loop1, gt_point: blst_fp12
aa1, bb1: blst_p1_affine aa1, bb1: blst_p1_affine
@ -369,29 +440,37 @@ proc verifyPairingsNeg(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : boo
proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool = proc verifyPairings(a1: blst_p1, a2: blst_p2, b1: blst_p1, b2: blst_p2) : bool =
## Wrapper to select verify pairings implementation ## Wrapper to select verify pairings implementation
##
verifyPairingsNaive(a1, a2, b1, b2) verifyPairingsNaive(a1, a2, b1, b2)
#verifyPairingsNeg(a1, a2, b1, b2) #verifyPairingsNeg(a1, a2, b1, b2)
proc verifyProof*(tau: Tau, q: openArray[QElement], mus: openArray[blst_scalar], sigma: blst_p1, spk: PublicKey): bool = proc verifyProof*(
self: PoR,
q: seq[QElement],
mus: seq[blst_scalar],
sigma: blst_p1): bool =
## Verify a BLS proof given a query ## Verify a BLS proof given a query
##
# verify signature on Tau # verify signature on Tau
var signature: Signature var signature: blscurve.Signature
if not signature.fromBytes(tau.signature): if not signature.fromBytes(self.tau.signature):
return false return false
if not verify(spk.signkey, $tau.t, signature):
if not verify(self.spk.signkey, $self.tau.t, signature):
return false return false
var first: blst_p1 var first: blst_p1
for qelem in q : for qelem in q:
var prod: blst_p1 var prod: blst_p1
prod.blst_p1_mult(hashNameI(tau.t.name, qelem.I), qelem.V, 255) prod.blst_p1_mult(hashNameI(self.tau.t.name, qelem.I), qelem.V, 255)
first.blst_p1_add_or_double(first, prod) first.blst_p1_add_or_double(first, prod)
doAssert(blst_p1_on_curve(first).bool) doAssert(blst_p1_on_curve(first).bool)
let us = tau.t.u let us = self.tau.t.u
var second: blst_p1 var second: blst_p1
for j in 0 ..< len(us) : for j in 0..<len(us):
var prod: blst_p1 var prod: blst_p1
prod.blst_p1_mult(us[j], mus[j], 255) prod.blst_p1_mult(us[j], mus[j], 255)
second.blst_p1_add_or_double(second, prod) second.blst_p1_add_or_double(second, prod)
@ -400,7 +479,55 @@ proc verifyProof*(tau: Tau, q: openArray[QElement], mus: openArray[blst_scalar],
var sum: blst_p1 var sum: blst_p1
sum.blst_p1_add_or_double(first, second) sum.blst_p1_add_or_double(first, second)
var g{.noInit.}: blst_p2 var g {.noInit.}: blst_p2
g.blst_p2_from_affine(BLS12_381_G2) g.blst_p2_from_affine(BLS12_381_G2)
return verifyPairings(sum, spk.key, sigma, g) return verifyPairings(sum, self.spk.key, sigma, g)
proc init*(
T: type PoR,
stream: SeekableStream,
ssk: SecretKey,
spk: PublicKey,
blockSize: int64): Future[PoR] {.async.} =
## Set up the POR scheme by generating tags and metadata
##
doAssert(
(blockSize mod BytesPerSector) == 0,
"Block size should be divisible by `BytesPerSector`")
let
s = blockSize div BytesPerSector
n = stream.sectorsCount(s)
# generate a random name
var t = TauZero(n: n)
for i in 0..<Namelen:
t.name[i] = byte Rng.instance.rand(0xFF)
# generate the coefficient vector for combining sectors of a block: U
var ubase: seq[blst_scalar]
for i in 0..<s:
let (u, ub) = rndP1()
t.u.add(u)
ubase.add(ub)
#TODO: a better bytearray conversion of TauZero for the signature might be needed
# the current conversion using $t might be architecture dependent and not unique
let
signature = sign(ssk.signkey, $t)
tau = Tau(t: t, signature: signature.exportRaw())
# generate sigmas
var
sigmas: seq[blst_p1]
for i in 0..<n:
sigmas.add((await stream.generateAuthenticator(ssk, i, s, t, ubase)))
return PoR(
ssk: ssk,
spk: spk,
tau: tau,
authenticators: sigmas)

View File

@ -0,0 +1,3 @@
import ./serialization/serialization
export serialization

View File

@ -0,0 +1,33 @@
syntax = "proto3";
message PoREnvelope {
message TauZeroMessage {
bytes name = 1;
int64 n = 2;
repeated bytes u = 3;
}
message TauMessage {
TauZeroMessage t = 1;
bytes signature = 2;
}
message ProofMessage {
repeated bytes mu = 1;
bytes sigma = 2;
}
message PubKeyMessage {
bytes signkey = 1;
bytes key = 2;
}
message PorMessage {
TauMessage tau = 1;
PubKeyMessage spk = 2;
repeated bytes authenticators = 3;
}
PorMessage por = 1;
ProofMessage proof = 2;
}

View File

@ -0,0 +1,170 @@
## Nim-POS
## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sequtils
import pkg/protobuf_serialization
import pkg/stew/results
import pkg/stew/objects
import pkg/blscurve
import pkg/blscurve/blst/blst_abi
import_proto3 "por.proto"
export TauZeroMessage
export TauMessage
export ProofMessage
export PorMessage
export PoREnvelope
import ../por
func toMessage*(self: Proof): ProofMessage =
var
message = ProofMessage()
sigma: array[96, byte]
for mu in self.mu:
var
serialized: array[32, byte]
blst_bendian_from_scalar(serialized, mu)
message.mu.add(toSeq(serialized))
blst_p1_serialize(sigma, self.sigma)
message.sigma = toSeq(sigma)
message
func fromMessage*(self: ProofMessage): Result[Proof, string] =
var
proof = Proof()
sigmaAffine: blst_p1_affine
if blst_p1_deserialize(sigmaAffine, toArray(96, self.sigma)) != BLST_SUCCESS:
return err("Unable to decompress sigma")
blst_p1_from_affine(proof.sigma, sigmaAffine)
for mu in self.mu:
var
muScalar: blst_scalar
blst_scalar_from_bendian(muScalar, toArray(32, mu))
proof.mu.add(muScalar)
ok(proof)
func toMessage*(self: TauZero): TauZeroMessage =
var
message = TauZeroMessage(
name: toSeq(self.name),
n: self.n)
for u in self.u:
var
serialized: array[96, byte]
# serialized and compresses the points
blst_p1_serialize(serialized, u)
message.u.add(toSeq(serialized))
message
func fromMessage*(self: TauZeroMessage): Result[TauZero, string] =
var
tauZero: TauZero
tauZero.name = toArray(512, self.name)
tauZero.n = self.n
for u in self.u:
var
uuAffine: blst_p1_affine
uu: blst_p1
if blst_p1_deserialize(uuAffine, toArray(96, u)) != BLST_SUCCESS:
return err("Unable to decompress u")
blst_p1_from_affine(uu, uuAffine)
tauZero.u.add(uu)
ok(tauZero)
func toMessage*(self: Tau): TauMessage =
TauMessage(
t: self.t.toMessage(),
signature: toSeq(self.signature)) # signature is already in serialized form
func fromMessage*(self: TauMessage): Result[Tau, string] =
var
message = Tau(
t: ? self.t.fromMessage(),
signature: toArray(96, self.signature))
ok(message)
func toMessage*(self: por.PublicKey): PubKeyMessage =
var
signkey = toSeq(self.signkey.exportUncompressed())
message = PubKeyMessage(signkey: signkey)
key: array[192, byte]
blst_p2_serialize(key, self.key)
message.key = toSeq(key)
message
func fromMessage*(self: PubKeyMessage): Result[por.PublicKey, string] =
var
spk: por.PublicKey
keyAffine: blst_p2_affine
if not spk.signkey.fromBytes(self.signkey.toOpenArray(0, 95)):
return err("Unable to deserialize public key!")
if blst_p2_deserialize(keyAffine, toArray(192, self.key)) != BLST_SUCCESS:
return err("Unable to decompress key!")
blst_p2_from_affine(spk.key, keyAffine)
ok(spk)
func toMessage*(self: PoR): PorMessage =
var
message = PorMessage(
tau: self.tau.toMessage(),
spk: self.spk.toMessage())
for sigma in self.authenticators:
var
serialized: array[96, byte]
blst_p1_serialize(serialized, sigma)
message.authenticators.add(toSeq(serialized))
message
func fromMessage*(self: PorMessage): Result[PoR, string] =
var
por = PoR(
tau: ? self.tau.fromMessage(),
spk: ? self.spk.fromMessage())
for sigma in self.authenticators:
var
sigmaAffine: blst_p1_affine
authenticator: blst_p1
if blst_p1_deserialize(sigmaAffine, toArray(96, sigma)) != BLST_SUCCESS:
return err("Unable to decompress sigma")
blst_p1_from_affine(authenticator, sigmaAffine)
por.authenticators.add(authenticator)
return ok(por)

View File

@ -0,0 +1,4 @@
import ./timing/periods
import ./timing/proofs
export periods, proofs

View File

@ -1,4 +1,4 @@
## Nim-Codex ## Nim-Dagger
## Copyright (c) 2022 Status Research & Development GmbH ## Copyright (c) 2022 Status Research & Development GmbH
## Licensed under either of ## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
@ -25,12 +25,13 @@ import ./seekablestream
export stores, blocktype, manifest, chronos export stores, blocktype, manifest, chronos
logScope: logScope:
topics = "codex storestream" topics = "dagger storestream"
type type
StoreStream* = ref object of SeekableStream StoreStream* = ref object of SeekableStream
store*: BlockStore store*: BlockStore
manifest*: Manifest manifest*: Manifest
emptyBlock*: seq[byte]
proc new*( proc new*(
T: type StoreStream, T: type StoreStream,
@ -39,7 +40,8 @@ proc new*(
result = T( result = T(
store: store, store: store,
manifest: manifest, manifest: manifest,
offset: 0) offset: 0,
emptyBlock: newSeq[byte](manifest.blockSize))
result.initStream() result.initStream()
@ -57,12 +59,12 @@ method readOnce*(
var var
read = 0 read = 0
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.len
while read < nbytes and not self.atEof: while read < nbytes and not self.atEof:
let let
pos = self.offset div self.manifest.blockSize pos = self.offset div self.manifest.blockSize
blk = (await self.store.getBlock(self.manifest[pos])).tryGet() blk = (await self.store.getBlock(self.manifest[pos])).tryGet()
let
blockOffset = blockOffset =
if self.offset >= self.manifest.blockSize: if self.offset >= self.manifest.blockSize:
self.offset mod self.manifest.blockSize self.offset mod self.manifest.blockSize
@ -75,7 +77,15 @@ method readOnce*(
else: else:
min(nbytes - read, self.manifest.blockSize) min(nbytes - read, self.manifest.blockSize)
copyMem(pbytes.offset(read), unsafeAddr blk.data[blockOffset], readBytes) trace "Reading bytes from store stream", pos, cid = blk.cid, bytes = readBytes, blockOffset = blockOffset
copyMem(
pbytes.offset(read),
if blk.isEmpty:
self.emptyBlock[blockOffset].addr
else:
blk.data[blockOffset].addr,
readBytes)
self.offset += readBytes self.offset += readBytes
read += readBytes read += readBytes

View File

@ -5,7 +5,6 @@ import std/tables
import pkg/asynctest import pkg/asynctest
import pkg/chronos import pkg/chronos
import pkg/chronicles
import pkg/libp2p import pkg/libp2p
import pkg/codex/rng import pkg/codex/rng

View File

@ -16,7 +16,6 @@ import pkg/codex/discovery
import pkg/codex/blocktype as bt import pkg/codex/blocktype as bt
import ../../helpers import ../../helpers
import ../../examples
suite "NetworkStore engine - 2 nodes": suite "NetworkStore engine - 2 nodes":
let let

View File

@ -1,6 +1,10 @@
import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/libp2p/varint import pkg/libp2p/varint
import pkg/codex/blocktype import pkg/codex/blocktype as bt
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/rng
import ./helpers/nodeutils import ./helpers/nodeutils
import ./helpers/randomchunker import ./helpers/randomchunker
@ -10,8 +14,8 @@ export randomchunker, nodeutils
# NOTE: The meaning of equality for blocks # NOTE: The meaning of equality for blocks
# is changed here, because blocks are now `ref` # is changed here, because blocks are now `ref`
# types. This is only in tests!!! # types. This is only in tests!!!
func `==`*(a, b: Block): bool = func `==`*(a, b: bt.Block): bool =
(a.cid == b.cid) and (a.data == b.data) (a.cid == b.cid) and (a.data == b. data)
proc lenPrefix*(msg: openArray[byte]): seq[byte] = proc lenPrefix*(msg: openArray[byte]): seq[byte] =
## Write `msg` with a varint-encoded length prefix ## Write `msg` with a varint-encoded length prefix
@ -23,3 +27,34 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] =
buf[vbytes.len..<buf.len] = msg buf[vbytes.len..<buf.len] = msg
return buf return buf
proc corruptBlocks*(
store: BlockStore,
manifest: Manifest,
blks, bytes: int): Future[seq[int]] {.async.} =
var pos: seq[int]
while true:
if pos.len >= blks:
break
var i = -1
if (i = Rng.instance.rand(manifest.len - 1); pos.find(i) >= 0):
continue
pos.add(i)
var
blk = (await store.getBlock(manifest[i])).tryGet()
bytePos: seq[int]
while true:
if bytePos.len > bytes:
break
var ii = -1
if (ii = Rng.instance.rand(blk.data.len - 1); bytePos.find(ii) >= 0):
continue
bytePos.add(ii)
blk.data[ii] = byte 0
return pos

View File

@ -2,7 +2,7 @@ import std/sets
import std/tables import std/tables
import std/sequtils import std/sequtils
import pkg/upraises import pkg/upraises
import pkg/codex/por/timing/proofs import pkg/codex/storageproofs
type type
MockProofs* = ref object of Proofs MockProofs* = ref object of Proofs

View File

@ -0,0 +1,164 @@
import pkg/chronos
import pkg/asynctest
import pkg/blscurve/blst/blst_abi
import pkg/codex/streams
import pkg/codex/storageproofs as st
import pkg/codex/stores
import pkg/codex/manifest
import pkg/codex/chunker
import pkg/codex/rng
import pkg/codex/blocktype as bt
import ../helpers
const
SectorSize = 31
SectorsPerBlock = BlockSize div SectorSize
DataSetSize = BlockSize * 100
suite "BLS PoR":
var
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
setup:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
(spk, ssk) = st.keyGen()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let
blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
if not (await store.putBlock(blk)):
raise newException(CatchableError, "Unable to store block " & $blk.cid)
test "Test PoR without corruption":
let
por = await PoR.init(
StoreStream.new(store, manifest),
ssk,
spk,
BlockSize)
q = generateQuery(por.tau, 22)
proof = await generateProof(
StoreStream.new(store, manifest),
q,
por.authenticators,
SectorsPerBlock)
check por.verifyProof(q, proof.mu, proof.sigma)
test "Test PoR with corruption - query: 22, corrupted blocks: 300, bytes: 10":
let
por = await PoR.init(
StoreStream.new(store, manifest),
ssk,
spk,
BlockSize)
pos = await store.corruptBlocks(manifest, 30, 10)
q = generateQuery(por.tau, 22)
proof = await generateProof(
StoreStream.new(store, manifest),
q,
por.authenticators,
SectorsPerBlock)
check pos.len == 30
check not por.verifyProof(q, proof.mu, proof.sigma)
suite "Test Serialization":
var
chunker: RandomChunker
manifest: Manifest
store: BlockStore
ssk: st.SecretKey
spk: st.PublicKey
por: PoR
q: seq[QElement]
proof: Proof
setupAll:
chunker = RandomChunker.new(Rng.instance(), size = DataSetSize, chunkSize = BlockSize)
store = CacheStore.new(cacheSize = DataSetSize, chunkSize = BlockSize)
manifest = Manifest.new(blockSize = BlockSize).tryGet()
while (
let chunk = await chunker.getBytes();
chunk.len > 0):
let
blk = bt.Block.new(chunk).tryGet()
manifest.add(blk.cid)
if not (await store.putBlock(blk)):
raise newException(CatchableError, "Unable to store block " & $blk.cid)
(spk, ssk) = st.keyGen()
por = await PoR.init(
StoreStream.new(store, manifest),
ssk,
spk,
BlockSize)
q = generateQuery(por.tau, 22)
proof = await generateProof(
StoreStream.new(store, manifest),
q,
por.authenticators,
SectorsPerBlock)
test "Serialize Public Key":
var
spkMessage = spk.toMessage()
check:
spk.signkey == spkMessage.fromMessage().tryGet().signkey
spk.key.blst_p2_is_equal(spkMessage.fromMessage().tryGet().key).bool
test "Serialize TauZero":
var
tauZeroMessage = por.tau.t.toMessage()
tauZero = tauZeroMessage.fromMessage().tryGet()
check:
por.tau.t.name == tauZero.name
por.tau.t.n == tauZero.n
for i in 0..<por.tau.t.u.len:
check blst_p1_is_equal(por.tau.t.u[i], tauZero.u[i]).bool
test "Serialize Tau":
var
tauMessage = por.tau.toMessage()
tau = tauMessage.fromMessage().tryGet()
check:
por.tau.signature == tau.signature
test "Serialize PoR":
let
porMessage = por.toMessage()
ppor = porMessage.fromMessage().tryGet()
for i in 0..<por.authenticators.len:
check blst_p1_is_equal(por.authenticators[i], ppor.authenticators[i]).bool
test "Serialize Proof":
let
proofMessage = proof.toMessage()
pproof = proofMessage.fromMessage().tryGet()
check:
proof.sigma.blst_p1_is_equal(pproof.sigma).bool
proof.mu == pproof.mu

View File

@ -0,0 +1,3 @@
import ./storageproofs/testpor
{.warning[UnusedImport]: off.}

View File

@ -8,7 +8,6 @@ import ./helpers
import pkg/codex/streams import pkg/codex/streams
import pkg/codex/stores import pkg/codex/stores
import pkg/codex/manifest import pkg/codex/manifest
import pkg/codex/rng
import pkg/codex/blocktype as bt import pkg/codex/blocktype as bt
suite "StoreStream": suite "StoreStream":

View File

@ -3,7 +3,7 @@ import pkg/chronos
import pkg/nimcrypto import pkg/nimcrypto
import codex/contracts import codex/contracts
import codex/contracts/testtoken import codex/contracts/testtoken
import codex/por/timing/periods import codex/storageproofs
import ../ethertest import ../ethertest
import ./examples import ./examples
import ./time import ./time

View File

@ -1,5 +1,6 @@
import ./codex/teststores import ./codex/teststores
import ./codex/testblockexc import ./codex/testblockexchange
import ./codex/teststorageproofs
import ./codex/testasyncheapqueue import ./codex/testasyncheapqueue
import ./codex/testchunking import ./codex/testchunking
import ./codex/testmanifest import ./codex/testmanifest

2
vendor/asynctest vendored

@ -1 +1 @@
Subproject commit 3882ed64ed3159578f796bc5ae0c6b13837fe798 Subproject commit 5347c59b4b057443a014722aa40800cd8bb95c69