mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-18 01:16:46 +00:00
Add snappy and protobuf-serialization; Use the latest FastStreams API
This commit is contained in:
parent
778c2011e3
commit
f34fd46e3a
10
.gitmodules
vendored
10
.gitmodules
vendored
@ -138,3 +138,13 @@
|
|||||||
url = https://github.com/status-im/nim-testutils.git
|
url = https://github.com/status-im/nim-testutils.git
|
||||||
ignore = dirty
|
ignore = dirty
|
||||||
branch = master
|
branch = master
|
||||||
|
[submodule "vendor/nim-snappy"]
|
||||||
|
path = vendor/nim-snappy
|
||||||
|
url = https://github.com/status-im/nim-snappy.git
|
||||||
|
ignore = dirty
|
||||||
|
branch = master
|
||||||
|
[submodule "vendor/nim-protobuf-serialization"]
|
||||||
|
path = vendor/nim-protobuf-serialization
|
||||||
|
url = https://github.com/status-im/nim-protobuf-serialization.git
|
||||||
|
ignore = dirty
|
||||||
|
branch = master
|
||||||
|
@ -353,7 +353,7 @@ proc readResponse(
|
|||||||
return await conn.readChunk(MsgType, true, deadline)
|
return await conn.readChunk(MsgType, true, deadline)
|
||||||
|
|
||||||
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
s.append byte(responseCode)
|
s.append byte(responseCode)
|
||||||
s.appendVarint errMsg.len
|
s.appendVarint errMsg.len
|
||||||
s.appendValue SSZ, errMsg
|
s.appendValue SSZ, errMsg
|
||||||
@ -395,7 +395,7 @@ proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.
|
|||||||
defer:
|
defer:
|
||||||
await safeClose(stream)
|
await safeClose(stream)
|
||||||
|
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
s.appendVarint requestBytes.len.uint64
|
s.appendVarint requestBytes.len.uint64
|
||||||
s.append requestBytes
|
s.append requestBytes
|
||||||
let bytes = s.getOutput
|
let bytes = s.getOutput
|
||||||
@ -404,7 +404,7 @@ proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.
|
|||||||
# TODO There is too much duplication in the responder functions, but
|
# TODO There is too much duplication in the responder functions, but
|
||||||
# I hope to reduce this when I increse the reliance on output streams.
|
# I hope to reduce this when I increse the reliance on output streams.
|
||||||
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
s.append byte(Success)
|
s.append byte(Success)
|
||||||
s.appendVarint payload.len.uint64
|
s.appendVarint payload.len.uint64
|
||||||
s.append payload
|
s.append payload
|
||||||
@ -412,14 +412,14 @@ proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async
|
|||||||
await responder.stream.write(bytes)
|
await responder.stream.write(bytes)
|
||||||
|
|
||||||
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
s.append byte(Success)
|
s.append byte(Success)
|
||||||
s.appendValue SSZ, sizePrefixed(val)
|
s.appendValue SSZ, sizePrefixed(val)
|
||||||
let bytes = s.getOutput
|
let bytes = s.getOutput
|
||||||
await responder.stream.write(bytes)
|
await responder.stream.write(bytes)
|
||||||
|
|
||||||
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
for chunk in chunks:
|
for chunk in chunks:
|
||||||
s.append byte(Success)
|
s.append byte(Success)
|
||||||
s.appendValue SSZ, sizePrefixed(chunk)
|
s.appendValue SSZ, sizePrefixed(chunk)
|
||||||
@ -446,7 +446,7 @@ proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
|||||||
await safeClose(stream)
|
await safeClose(stream)
|
||||||
|
|
||||||
# Send the request
|
# Send the request
|
||||||
var s = init OutputStream
|
var s = memoryOutput()
|
||||||
s.appendVarint requestBytes.len.uint64
|
s.appendVarint requestBytes.len.uint64
|
||||||
s.append requestBytes
|
s.append requestBytes
|
||||||
let bytes = s.getOutput
|
let bytes = s.getOutput
|
||||||
|
@ -32,19 +32,22 @@ const
|
|||||||
|
|
||||||
type
|
type
|
||||||
SszReader* = object
|
SszReader* = object
|
||||||
stream: ByteStreamVar
|
stream: InputStream
|
||||||
maxObjectSize: int
|
maxObjectSize: int
|
||||||
|
|
||||||
SszWriter* = object
|
SszWriter* = object
|
||||||
stream: OutputStreamVar
|
stream: OutputStream
|
||||||
|
|
||||||
BasicType = char|bool|SomeUnsignedInt|StUint|ValidatorIndex
|
BasicType = char|bool|SomeUnsignedInt|StUint|ValidatorIndex
|
||||||
|
|
||||||
SszChunksMerkelizer = ref object of RootObj
|
SszChunksMerkelizer = ref object
|
||||||
combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
|
combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
|
||||||
totalChunks: uint64
|
totalChunks: uint64
|
||||||
limit: uint64
|
limit: uint64
|
||||||
|
|
||||||
|
SszHashingStream = ref object of OutputStream
|
||||||
|
merkleizer: SszChunksMerkelizer
|
||||||
|
|
||||||
TypeWithMaxLen*[T; maxLen: static int64] = distinct T
|
TypeWithMaxLen*[T; maxLen: static int64] = distinct T
|
||||||
|
|
||||||
SizePrefixed*[T] = distinct T
|
SizePrefixed*[T] = distinct T
|
||||||
@ -68,11 +71,11 @@ template sizePrefixed*[TT](x: TT): untyped =
|
|||||||
SizePrefixed[T](x)
|
SizePrefixed[T](x)
|
||||||
|
|
||||||
proc init*(T: type SszReader,
|
proc init*(T: type SszReader,
|
||||||
stream: ByteStreamVar,
|
stream: InputStream,
|
||||||
maxObjectSize = defaultMaxObjectSize): T =
|
maxObjectSize = defaultMaxObjectSize): T =
|
||||||
T(stream: stream, maxObjectSize: maxObjectSize)
|
T(stream: stream, maxObjectSize: maxObjectSize)
|
||||||
|
|
||||||
proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T =
|
proc mount*(F: type SSZ, stream: InputStream, T: type): T =
|
||||||
mixin readValue
|
mixin readValue
|
||||||
var reader = init(SszReader, stream)
|
var reader = init(SszReader, stream)
|
||||||
reader.readValue(T)
|
reader.readValue(T)
|
||||||
@ -108,7 +111,7 @@ template toSszType*(x: auto): auto =
|
|||||||
elif useListType and x is List: seq[x.T](x)
|
elif useListType and x is List: seq[x.T](x)
|
||||||
else: x
|
else: x
|
||||||
|
|
||||||
func writeFixedSized(c: var WriteCursor, x: auto) =
|
proc writeFixedSized(c: var WriteCursor, x: auto) =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
|
|
||||||
when x is byte:
|
when x is byte:
|
||||||
@ -136,14 +139,14 @@ func writeFixedSized(c: var WriteCursor, x: auto) =
|
|||||||
else:
|
else:
|
||||||
unsupported x.type
|
unsupported x.type
|
||||||
|
|
||||||
template writeFixedSized(s: OutputStreamVar, x: auto) =
|
template writeFixedSized(s: OutputStream, x: auto) =
|
||||||
writeFixedSized(s.cursor, x)
|
writeFixedSized(s.cursor, x)
|
||||||
|
|
||||||
template supports*(_: type SSZ, T: type): bool =
|
template supports*(_: type SSZ, T: type): bool =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
||||||
|
|
||||||
func init*(T: type SszWriter, stream: OutputStreamVar): T =
|
func init*(T: type SszWriter, stream: OutputStream): T =
|
||||||
result.stream = stream
|
result.stream = stream
|
||||||
|
|
||||||
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||||
@ -152,9 +155,9 @@ template enumerateSubFields(holder, fieldVar, body: untyped) =
|
|||||||
else:
|
else:
|
||||||
enumInstanceSerializedFields(holder, _, fieldVar): body
|
enumInstanceSerializedFields(holder, _, fieldVar): body
|
||||||
|
|
||||||
func writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||||
|
|
||||||
func beginRecord*(w: var SszWriter, TT: type): auto =
|
proc beginRecord*(w: var SszWriter, TT: type): auto =
|
||||||
type T = TT
|
type T = TT
|
||||||
when isFixedSize(T):
|
when isFixedSize(T):
|
||||||
FixedSizedWriterCtx()
|
FixedSizedWriterCtx()
|
||||||
@ -190,7 +193,7 @@ template endRecord*(w: var SszWriter, ctx: var auto) =
|
|||||||
when ctx is VarSizedWriterCtx:
|
when ctx is VarSizedWriterCtx:
|
||||||
finalize ctx.fixedParts
|
finalize ctx.fixedParts
|
||||||
|
|
||||||
func writeVarSizeType(w: var SszWriter, value: auto) =
|
proc writeVarSizeType(w: var SszWriter, value: auto) =
|
||||||
trs "STARTING VAR SIZE TYPE"
|
trs "STARTING VAR SIZE TYPE"
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
type T = type toSszType(value)
|
type T = type toSszType(value)
|
||||||
@ -227,7 +230,7 @@ func writeVarSizeType(w: var SszWriter, value: auto) =
|
|||||||
writeField w, ctx, astToStr(field), field
|
writeField w, ctx, astToStr(field), field
|
||||||
endRecord w, ctx
|
endRecord w, ctx
|
||||||
|
|
||||||
func writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
||||||
mixin toSszType
|
mixin toSszType
|
||||||
type T = type toSszType(x)
|
type T = type toSszType(x)
|
||||||
|
|
||||||
@ -238,7 +241,7 @@ func writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
|||||||
else:
|
else:
|
||||||
unsupported type(x)
|
unsupported type(x)
|
||||||
|
|
||||||
func writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
||||||
var cursor = w.stream.delayVarSizeWrite(10)
|
var cursor = w.stream.delayVarSizeWrite(10)
|
||||||
let initPos = w.stream.pos
|
let initPos = w.stream.pos
|
||||||
w.writeValue T(x)
|
w.writeValue T(x)
|
||||||
@ -260,21 +263,21 @@ template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
|||||||
proc readValue*[T](r: var SszReader, val: var T) =
|
proc readValue*[T](r: var SszReader, val: var T) =
|
||||||
when isFixedSize(T):
|
when isFixedSize(T):
|
||||||
const minimalSize = fixedPortionSize(T)
|
const minimalSize = fixedPortionSize(T)
|
||||||
if r.stream[].ensureBytes(minimalSize):
|
if r.stream.readable(minimalSize):
|
||||||
val = readSszValue(r.stream.readBytes(minimalSize), T)
|
val = readSszValue(r.stream.read(minimalSize), T)
|
||||||
else:
|
else:
|
||||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||||
else:
|
else:
|
||||||
# TODO Read the fixed portion first and precisely measure the size of
|
# TODO Read the fixed portion first and precisely measure the size of
|
||||||
# the dynamic portion to consume the right number of bytes.
|
# the dynamic portion to consume the right number of bytes.
|
||||||
val = readSszValue(r.stream.readBytes(r.stream.endPos), T)
|
val = readSszValue(r.stream.read(r.stream.endPos), T)
|
||||||
|
|
||||||
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
||||||
let length = r.stream.readVarint(uint64)
|
let length = r.stream.readVarint(uint64)
|
||||||
if length > r.maxObjectSize:
|
if length > r.maxObjectSize:
|
||||||
raise newException(SszMaxSizeExceeded,
|
raise newException(SszMaxSizeExceeded,
|
||||||
"Maximum SSZ object size exceeded: " & $length)
|
"Maximum SSZ object size exceeded: " & $length)
|
||||||
val = readSszValue(r.stream.readBytes(length), T)
|
val = readSszValue(r.stream.read(length), T)
|
||||||
|
|
||||||
const
|
const
|
||||||
zeroChunk = default array[32, byte]
|
zeroChunk = default array[32, byte]
|
||||||
@ -321,39 +324,39 @@ func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
|
|||||||
{.noSideEffect.}:
|
{.noSideEffect.}:
|
||||||
zeroHashes[idx]
|
zeroHashes[idx]
|
||||||
|
|
||||||
func addChunk(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
|
func addChunk(merkleizer: SszChunksMerkelizer, data: openarray[byte]) =
|
||||||
doAssert data.len > 0 and data.len <= bytesPerChunk
|
doAssert data.len > 0 and data.len <= bytesPerChunk
|
||||||
|
|
||||||
if not getBitLE(merkelizer.totalChunks, 0):
|
if not getBitLE(merkleizer.totalChunks, 0):
|
||||||
let chunkStartAddr = addr merkelizer.combinedChunks[0].data[0]
|
let chunkStartAddr = addr merkleizer.combinedChunks[0].data[0]
|
||||||
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
||||||
zeroMem(chunkStartAddr.offset(data.len), bytesPerChunk - data.len)
|
zeroMem(chunkStartAddr.offset(data.len), bytesPerChunk - data.len)
|
||||||
trs "WROTE BASE CHUNK ", merkelizer.combinedChunks[0]
|
trs "WROTE BASE CHUNK ", merkleizer.combinedChunks[0]
|
||||||
else:
|
else:
|
||||||
var hash = mergeBranches(merkelizer.combinedChunks[0], data)
|
var hash = mergeBranches(merkleizer.combinedChunks[0], data)
|
||||||
|
|
||||||
for i in 1 .. high(merkelizer.combinedChunks):
|
for i in 1 .. high(merkleizer.combinedChunks):
|
||||||
trs "ITERATING"
|
trs "ITERATING"
|
||||||
if getBitLE(merkelizer.totalChunks, i):
|
if getBitLE(merkleizer.totalChunks, i):
|
||||||
trs "CALLING MERGE BRANCHES"
|
trs "CALLING MERGE BRANCHES"
|
||||||
hash = mergeBranches(merkelizer.combinedChunks[i], hash)
|
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
||||||
else:
|
else:
|
||||||
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
||||||
merkelizer.combinedChunks[i] = hash
|
merkleizer.combinedChunks[i] = hash
|
||||||
break
|
break
|
||||||
|
|
||||||
inc merkelizer.totalChunks
|
inc merkleizer.totalChunks
|
||||||
|
|
||||||
func getFinalHash(merkelizer: SszChunksMerkelizer): Eth2Digest =
|
func getFinalHash(merkleizer: SszChunksMerkelizer): Eth2Digest =
|
||||||
let limit = merkelizer.limit
|
let limit = merkleizer.limit
|
||||||
|
|
||||||
if merkelizer.totalChunks == 0:
|
if merkleizer.totalChunks == 0:
|
||||||
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
|
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
|
||||||
return getZeroHashWithoutSideEffect(limitHeight)
|
return getZeroHashWithoutSideEffect(limitHeight)
|
||||||
|
|
||||||
let
|
let
|
||||||
bottomHashIdx = firstOne(merkelizer.totalChunks) - 1
|
bottomHashIdx = firstOne(merkleizer.totalChunks) - 1
|
||||||
submittedChunksHeight = bitWidth(merkelizer.totalChunks - 1)
|
submittedChunksHeight = bitWidth(merkleizer.totalChunks - 1)
|
||||||
topHashIdx = if limit <= 1: submittedChunksHeight
|
topHashIdx = if limit <= 1: submittedChunksHeight
|
||||||
else: max(submittedChunksHeight, bitWidth(limit - 1))
|
else: max(submittedChunksHeight, bitWidth(limit - 1))
|
||||||
|
|
||||||
@ -364,12 +367,12 @@ func getFinalHash(merkelizer: SszChunksMerkelizer): Eth2Digest =
|
|||||||
if bottomHashIdx != submittedChunksHeight:
|
if bottomHashIdx != submittedChunksHeight:
|
||||||
# Our tree is not finished. We must complete the work in progress
|
# Our tree is not finished. We must complete the work in progress
|
||||||
# branches and then extend the tree to the right height.
|
# branches and then extend the tree to the right height.
|
||||||
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||||
getZeroHashWithoutSideEffect(bottomHashIdx))
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||||
|
|
||||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||||
if getBitLE(merkelizer.totalChunks, i):
|
if getBitLE(merkleizer.totalChunks, i):
|
||||||
result = mergeBranches(merkelizer.combinedChunks[i], result)
|
result = mergeBranches(merkleizer.combinedChunks[i], result)
|
||||||
trs "COMBINED"
|
trs "COMBINED"
|
||||||
else:
|
else:
|
||||||
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||||
@ -377,52 +380,49 @@ func getFinalHash(merkelizer: SszChunksMerkelizer): Eth2Digest =
|
|||||||
|
|
||||||
elif bottomHashIdx == topHashIdx:
|
elif bottomHashIdx == topHashIdx:
|
||||||
# We have a perfect tree (chunks == 2**n) at just the right height!
|
# We have a perfect tree (chunks == 2**n) at just the right height!
|
||||||
result = merkelizer.combinedChunks[bottomHashIdx]
|
result = merkleizer.combinedChunks[bottomHashIdx]
|
||||||
else:
|
else:
|
||||||
# We have a perfect tree of user chunks, but we have more work to
|
# We have a perfect tree of user chunks, but we have more work to
|
||||||
# do - we must extend it to reach the desired height
|
# do - we must extend it to reach the desired height
|
||||||
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||||
getZeroHashWithoutSideEffect(bottomHashIdx))
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||||
|
|
||||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||||
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||||
|
|
||||||
let HashingStreamVTable = OutputStreamVTable(
|
let SszHashingStreamVTable = OutputStreamVTable(
|
||||||
writePage: proc (s: OutputStreamVar, data: openarray[byte])
|
writePageSync: proc (s: OutputStream, data: openarray[byte])
|
||||||
{.nimcall, gcsafe, raises: [Defect, IOError].} =
|
{.nimcall, gcsafe, raises: [Defect, IOError].} =
|
||||||
trs "ADDING STREAM CHUNK ", data
|
trs "ADDING STREAM CHUNK ", data
|
||||||
SszChunksMerkelizer(s.outputDevice).addChunk(data)
|
SszHashingStream(s).merkleizer.addChunk(data)
|
||||||
,
|
,
|
||||||
flush: proc (s: OutputStreamVar) {.nimcall, gcsafe.} =
|
flushSync: proc (s: OutputStream) {.nimcall, gcsafe.} =
|
||||||
discard
|
discard
|
||||||
)
|
)
|
||||||
|
|
||||||
func getVtableAddresWithoutSideEffect: ptr OutputStreamVTable =
|
func newSszHashingStream(merkleizer: SszChunksMerkelizer): OutputStream =
|
||||||
# TODO this is a work-around for the somewhat broken side
|
result = SszHashingStream(vtable: vtableAddr SszHashingStreamVTable,
|
||||||
# effects analysis of Nim - reading from global let variables
|
pageSize: bytesPerChunk,
|
||||||
# is considered a side-effect.
|
maxWriteSize: bytesPerChunk,
|
||||||
{.noSideEffect.}:
|
minWriteSize: bytesPerChunk,
|
||||||
unsafeAddr HashingStreamVTable
|
merkleizer: merkleizer)
|
||||||
|
result.initWithSinglePage()
|
||||||
func newSszHashingStream(merkelizer: SszChunksMerkelizer): ref OutputStream =
|
|
||||||
new result
|
|
||||||
result.initWithSinglePage(pageSize = bytesPerChunk,
|
|
||||||
maxWriteSize = bytesPerChunk,
|
|
||||||
minWriteSize = bytesPerChunk)
|
|
||||||
result.outputDevice = merkelizer
|
|
||||||
result.vtable = getVtableAddresWithoutSideEffect()
|
|
||||||
|
|
||||||
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||||
var dataLen: array[32, byte]
|
var dataLen: array[32, byte]
|
||||||
dataLen[0..<8] = uint64(length).toBytesLE()
|
dataLen[0..<8] = uint64(length).toBytesLE()
|
||||||
hash(root.data, dataLen)
|
hash(root.data, dataLen)
|
||||||
|
|
||||||
func merkelizeSerializedChunks(merkelizer: SszChunksMerkelizer,
|
func merkelizeSerializedChunks(merkleizer: SszChunksMerkelizer,
|
||||||
obj: auto): Eth2Digest =
|
obj: auto): Eth2Digest =
|
||||||
var hashingStream = newSszHashingStream merkelizer
|
|
||||||
hashingStream.writeFixedSized obj
|
var hashingStream = newSszHashingStream merkleizer
|
||||||
hashingStream.flush
|
{.noSideEffect.}:
|
||||||
merkelizer.getFinalHash
|
# We assume there are no side-effects here, because the
|
||||||
|
# SszHashingStream is keeping all of its output in memory.
|
||||||
|
hashingStream.writeFixedSized obj
|
||||||
|
hashingStream.flush
|
||||||
|
merkleizer.getFinalHash
|
||||||
|
|
||||||
func merkelizeSerializedChunks(obj: auto): Eth2Digest =
|
func merkelizeSerializedChunks(obj: auto): Eth2Digest =
|
||||||
merkelizeSerializedChunks(SszChunksMerkelizer(), obj)
|
merkelizeSerializedChunks(SszChunksMerkelizer(), obj)
|
||||||
@ -430,12 +430,12 @@ func merkelizeSerializedChunks(obj: auto): Eth2Digest =
|
|||||||
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe.}
|
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe.}
|
||||||
|
|
||||||
template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
|
template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
|
||||||
var merkelizer {.inject.} = SszChunksMerkelizer()
|
var merkleizer {.inject.} = SszChunksMerkelizer()
|
||||||
|
|
||||||
template addField(field) =
|
template addField(field) =
|
||||||
let hash = hash_tree_root(field)
|
let hash = hash_tree_root(field)
|
||||||
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
||||||
addChunk(merkelizer, hash.data)
|
addChunk(merkleizer, hash.data)
|
||||||
trs "CHUNK ADDED"
|
trs "CHUNK ADDED"
|
||||||
|
|
||||||
template addField2(field) {.used.}=
|
template addField2(field) {.used.}=
|
||||||
@ -448,10 +448,10 @@ template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
|
|||||||
|
|
||||||
body
|
body
|
||||||
|
|
||||||
merkelizer.getFinalHash
|
merkleizer.getFinalHash
|
||||||
|
|
||||||
func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest =
|
func bitlistHashTreeRoot(merkleizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest =
|
||||||
trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkelizer.limit
|
trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkleizer.limit
|
||||||
|
|
||||||
var
|
var
|
||||||
totalBytes = ByteList(x).len
|
totalBytes = ByteList(x).len
|
||||||
@ -461,8 +461,8 @@ func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest
|
|||||||
if totalBytes == 1:
|
if totalBytes == 1:
|
||||||
# This is an empty bit list.
|
# This is an empty bit list.
|
||||||
# It should be hashed as a tree containing all zeros:
|
# It should be hashed as a tree containing all zeros:
|
||||||
let treeHeight = if merkelizer.limit == 0: 0
|
let treeHeight = if merkleizer.limit == 0: 0
|
||||||
else: log2trunc(merkelizer.limit)
|
else: log2trunc(merkleizer.limit)
|
||||||
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
|
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
|
||||||
getZeroHashWithoutSideEffect(0)) # this is the mixed length
|
getZeroHashWithoutSideEffect(0)) # this is the mixed length
|
||||||
|
|
||||||
@ -485,7 +485,7 @@ func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest
|
|||||||
chunkStartPos = i * bytesPerChunk
|
chunkStartPos = i * bytesPerChunk
|
||||||
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
||||||
|
|
||||||
merkelizer.addChunk ByteList(x).toOpenArray(chunkEndPos, chunkEndPos)
|
merkleizer.addChunk ByteList(x).toOpenArray(chunkEndPos, chunkEndPos)
|
||||||
|
|
||||||
var
|
var
|
||||||
lastChunk: array[bytesPerChunk, byte]
|
lastChunk: array[bytesPerChunk, byte]
|
||||||
@ -496,8 +496,8 @@ func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest
|
|||||||
|
|
||||||
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
||||||
|
|
||||||
merkelizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
merkleizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
||||||
let contentsHash = merkelizer.getFinalHash
|
let contentsHash = merkleizer.getFinalHash
|
||||||
mixInLength contentsHash, x.len
|
mixInLength contentsHash, x.len
|
||||||
|
|
||||||
func hashTreeRootImpl[T](x: T): Eth2Digest =
|
func hashTreeRootImpl[T](x: T): Eth2Digest =
|
||||||
@ -555,19 +555,19 @@ func hash_tree_root*(x: auto): Eth2Digest =
|
|||||||
const maxLen = x.maxLen
|
const maxLen = x.maxLen
|
||||||
type T = type valueOf(x)
|
type T = type valueOf(x)
|
||||||
const limit = maxChunksCount(T, maxLen)
|
const limit = maxChunksCount(T, maxLen)
|
||||||
var merkelizer = SszChunksMerkelizer(limit: uint64(limit))
|
var merkleizer = SszChunksMerkelizer(limit: uint64(limit))
|
||||||
|
|
||||||
when T is BitList:
|
when T is BitList:
|
||||||
result = merkelizer.bitlistHashTreeRoot(BitSeq valueOf(x))
|
result = merkleizer.bitlistHashTreeRoot(BitSeq valueOf(x))
|
||||||
elif T is seq:
|
elif T is seq:
|
||||||
type E = ElemType(T)
|
type E = ElemType(T)
|
||||||
let contentsHash = when E is BasicType:
|
let contentsHash = when E is BasicType:
|
||||||
merkelizeSerializedChunks(merkelizer, valueOf(x))
|
merkelizeSerializedChunks(merkleizer, valueOf(x))
|
||||||
else:
|
else:
|
||||||
for elem in valueOf(x):
|
for elem in valueOf(x):
|
||||||
let elemHash = hash_tree_root(elem)
|
let elemHash = hash_tree_root(elem)
|
||||||
merkelizer.addChunk(elemHash.data)
|
merkleizer.addChunk(elemHash.data)
|
||||||
merkelizer.getFinalHash()
|
merkleizer.getFinalHash()
|
||||||
result = mixInLength(contentsHash, valueOf(x).len)
|
result = mixInLength(contentsHash, valueOf(x).len)
|
||||||
else:
|
else:
|
||||||
unsupported T # This should never happen
|
unsupported T # This should never happen
|
||||||
@ -583,7 +583,7 @@ iterator hash_tree_roots_prefix*[T](lst: openarray[T], limit: auto):
|
|||||||
# Eth1 deposit case is the only notable example -- the usual uses of a
|
# Eth1 deposit case is the only notable example -- the usual uses of a
|
||||||
# list involve, at some point, tree-hashing it -- finalized hashes are
|
# list involve, at some point, tree-hashing it -- finalized hashes are
|
||||||
# the only abstraction that escapes from this module this way.
|
# the only abstraction that escapes from this module this way.
|
||||||
var merkelizer = SszChunksMerkelizer(limit: uint64(limit))
|
var merkleizer = SszChunksMerkelizer(limit: uint64(limit))
|
||||||
for i, elem in lst:
|
for i, elem in lst:
|
||||||
merkelizer.addChunk(hash_tree_root(elem).data)
|
merkleizer.addChunk(hash_tree_root(elem).data)
|
||||||
yield mixInLength(merkelizer.getFinalHash(), i + 1)
|
yield mixInLength(merkleizer.getFinalHash(), i + 1)
|
||||||
|
@ -34,14 +34,14 @@ type
|
|||||||
discard
|
discard
|
||||||
|
|
||||||
jsonPrinter: proc (m: MemRange,
|
jsonPrinter: proc (m: MemRange,
|
||||||
outStream: OutputStreamVar,
|
outStream: OutputStream,
|
||||||
pretty: bool) {.gcsafe, noSideEffect.}
|
pretty: bool) {.gcsafe.}
|
||||||
|
|
||||||
DynamicSszNavigator* = object
|
DynamicSszNavigator* = object
|
||||||
m: MemRange
|
m: MemRange
|
||||||
typ: TypeInfo
|
typ: TypeInfo
|
||||||
|
|
||||||
func jsonPrinterImpl[T](m: MemRange, outStream: OutputStreamVar, pretty: bool) =
|
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) =
|
||||||
var typedNavigator = sszMount(m, T)
|
var typedNavigator = sszMount(m, T)
|
||||||
var jsonWriter = init(JsonWriter, outStream, pretty)
|
var jsonWriter = init(JsonWriter, outStream, pretty)
|
||||||
# TODO: it should be possible to serialize the navigator object
|
# TODO: it should be possible to serialize the navigator object
|
||||||
@ -141,11 +141,17 @@ func init*(T: type DynamicSszNavigator,
|
|||||||
T(m: MemRange(startAddr: unsafeAddr bytes[0], length: bytes.len),
|
T(m: MemRange(startAddr: unsafeAddr bytes[0], length: bytes.len),
|
||||||
typ: typeInfo(Navigated))
|
typ: typeInfo(Navigated))
|
||||||
|
|
||||||
func writeJson*(n: DynamicSszNavigator, outStream: OutputStreamVar, pretty = true) =
|
proc writeJson*(n: DynamicSszNavigator, outStream: OutputStream, pretty = true) =
|
||||||
n.typ.jsonPrinter(n.m, outStream, pretty)
|
n.typ.jsonPrinter(n.m, outStream, pretty)
|
||||||
|
|
||||||
func toJson*(n: DynamicSszNavigator, pretty = true): string =
|
func toJson*(n: DynamicSszNavigator, pretty = true): string =
|
||||||
var outStream = init OutputStream
|
var outStream = memoryOutput()
|
||||||
writeJson(n, outStream, pretty)
|
{.noSideEffect.}:
|
||||||
|
# We are assuming that there are no side-effects here
|
||||||
|
# because we are using a `memoryOutput`. The computed
|
||||||
|
# side-effects are coming from the fact that the dynamic
|
||||||
|
# dispatch mechanisms used in faststreams may be reading
|
||||||
|
# from a file or a network device.
|
||||||
|
writeJson(n, outStream, pretty)
|
||||||
outStream.getOutput(string)
|
outStream.getOutput(string)
|
||||||
|
|
||||||
|
2
vendor/nim-chronicles
vendored
2
vendor/nim-chronicles
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fb8af46311965fd076412e6e071dda571d282024
|
Subproject commit 2f0896a29ed9a7e42cf377020ea2b65affbfd6df
|
2
vendor/nim-eth
vendored
2
vendor/nim-eth
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 99c68d40f71a3bbb6b3e4024390cc7fde7c31ecb
|
Subproject commit 8f3bf360540f7aaeba21b0a4296ae70af6734c21
|
2
vendor/nim-faststreams
vendored
2
vendor/nim-faststreams
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8a3cf6778d483a9d701534dfc2f14f3a4dfc4ab8
|
Subproject commit 2c87f53c03b75da401aae625896721a88c626999
|
2
vendor/nim-json-serialization
vendored
2
vendor/nim-json-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f52683b2ee92501915ef062528c9e3ecd253d412
|
Subproject commit 96a337d334fe7fcfa847ba5e0a38cf5f141a0eea
|
1
vendor/nim-protobuf-serialization
vendored
Submodule
1
vendor/nim-protobuf-serialization
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 08c6db775f3daee35fee098c72c0e36b8ec57833
|
2
vendor/nim-serialization
vendored
2
vendor/nim-serialization
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 68e9ef790138b6644a87229afdd2e217bd801ad5
|
Subproject commit 67bb6a77bb9dce9cd3b79fb1fcc0bdb333331e22
|
1
vendor/nim-snappy
vendored
Submodule
1
vendor/nim-snappy
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 80cff583e33a026ac944ca3b6ea9ed95a3fc7e0e
|
2
vendor/nim-stew
vendored
2
vendor/nim-stew
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 805ef4f1b2267d0f6323d92a8eae943834a04cb9
|
Subproject commit 8528ce28b4c53351f70f62bbe56e92336efe42e4
|
Loading…
x
Reference in New Issue
Block a user