Moved rlp and trie to eth

This commit is contained in:
Yuriy Glukhov 2019-02-05 14:01:10 +02:00
parent 3917447084
commit 32998f0dc1
56 changed files with 5848 additions and 9 deletions

View File

@ -1,6 +1,9 @@
version: '{build}'
cache:
- sqlite-dll-win32-x86-3240000.zip -> .appveyor.yml
- sqlite-dll-win64-x64-3240000.zip -> .appveyor.yml
- nimbus-deps.zip -> .appveyor.yml
- x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z
- i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z
- Nim
@ -19,23 +22,54 @@ install:
- IF "%PLATFORM%" == "x86" (
SET "MINGW_ARCHIVE=i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z" &
SET "MINGW_URL=https://sourceforge.net/projects/mingw-w64/files/Toolchains%%20targetting%%20Win32/Personal%%20Builds/mingw-builds/4.9.2/threads-win32/dwarf/i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z" &
SET "MINGW_DIR=mingw32"
SET "MINGW_DIR=mingw32" &
SET "SQLITE_URL=https://www.sqlite.org/2018/sqlite-dll-win32-x86-3240000.zip" &
SET "SQLITE_ARCHIVE=sqlite-dll-win32-x86-3240000.zip" &
SET "ROCKSDB_URL=https://github.com/status-im/nimbus-deps/releases/download/nimbus-deps/nimbus-deps.zip" &
SET "ROCKSDB_ARCHIVE=nimbus-deps.zip"
) ELSE (
IF "%PLATFORM%" == "x64" (
SET "MINGW_ARCHIVE=x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z" &
SET "MINGW_URL=https://sourceforge.net/projects/mingw-w64/files/Toolchains%%20targetting%%20Win64/Personal%%20Builds/mingw-builds/4.9.2/threads-win32/seh/x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z" &
SET "MINGW_DIR=mingw64"
SET "MINGW_DIR=mingw64" &
SET "SQLITE_URL=https://www.sqlite.org/2018/sqlite-dll-win64-x64-3240000.zip" &
SET "SQLITE_ARCHIVE=sqlite-dll-win64-x64-3240000.zip" &
SET "ROCKSDB_URL=https://github.com/status-im/nimbus-deps/releases/download/nimbus-deps/nimbus-deps.zip" &
SET "ROCKSDB_ARCHIVE=nimbus-deps.zip"
) else (
echo "Unknown platform"
)
)
- SET PATH=%CD%\%MINGW_DIR%\bin;%CD%\Nim\bin;%PATH%
- MKDIR %CD%\bin
- SET PATH=%CD%\%MINGW_DIR%\bin;%CD%\bin;%CD%\Nim\bin;%PATH%
# Unpack mingw
- IF NOT EXIST "%MINGW_ARCHIVE%" appveyor DownloadFile "%MINGW_URL%" -FileName "%MINGW_ARCHIVE%"
- 7z x -y "%MINGW_ARCHIVE%" > nul
# Unpack sqlite
- IF not exist "%SQLITE_ARCHIVE%" appveyor DownloadFile "%SQLITE_URL%" -FileName "%SQLITE_ARCHIVE%"
- 7z x -y "%SQLITE_ARCHIVE%" > nul
- IF "%PLATFORM%" == "x64" ( copy %CD%\sqlite3.dll %CD%\bin\sqlite3_64.dll ) ELSE ( copy %CD%\sqlite3.dll %CD%\bin\sqlite3_32.dll )
# Unpack rocksdb
- IF not exist "%ROCKSDB_ARCHIVE%" appveyor DownloadFile "%ROCKSDB_URL%" -FileName "%ROCKSDB_ARCHIVE%"
- 7z x -y "%ROCKSDB_ARCHIVE%" > nul
- IF "%PLATFORM%" == "x64" ( copy %CD%\x64\librocksdb.dll %CD%\bin\librocksdb.dll ) ELSE ( copy %CD%\x86\librocksdb.dll %CD%\bin\librocksdb.dll )
# download and build lmdb
- SET "LMDB_URL=https://github.com/LMDB/lmdb/archive"
- SET "LMDB_VER=0.9.22"
- SET "LMDB_ARCHIVE=LMDB_%LMDB_VER%.tar.gz"
- SET "LMDB_PATH=lmdb-LMDB_%LMDB_VER%\libraries\liblmdb"
- appveyor DownloadFile "%LMDB_URL%\%LMDB_ARCHIVE%" - FileName "%LMDB_ARCHIVE%"
- tar xvf %LMDB_ARCHIVE%
- cd %LMDB_PATH%
- gcc -shared -o lmdb.dll mdb.c midl.c -lntdll
- cd ..\..\..
- copy %LMDB_PATH%\lmdb.dll %CD%\bin\lmdb.dll
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches

View File

@ -9,10 +9,19 @@ sudo: false
cache:
directories:
- nim
- rocksdb
os:
- linux
- osx
matrix:
include:
- os: linux
sudo: required
before_install:
- export INSTALL_PATH=/usr
- export NPROC=$(nproc)
- os: osx
before_install:
- export INSTALL_PATH=$HOME # included in DYLD_FALLBACK_LIBRARY_PATH
- export NPROC=$(sysctl -n hw.ncpu)
install:
# build nim from our own branch - this to avoid the day-to-day churn and
@ -34,6 +43,23 @@ install:
}"
- "export PATH=$PWD/nim/$NIMVER/bin:$PATH"
# build our own rocksdb to test with a fixed version that we think works
- "export ROCKSDBVER=5.14.2"
- "[ -f rocksdb/rocksdb-$ROCKSDBVER/Makefile ] || { rm -rf rocksdb ; mkdir -p rocksdb; cd rocksdb; wget https://github.com/facebook/rocksdb/archive/v$ROCKSDBVER.tar.gz && tar xvf v$ROCKSDBVER.tar.gz; cd ..; }"
- cd rocksdb/rocksdb-$ROCKSDBVER
- "[ -f util/build_version.cc ] || { make util/build_version.cc ; }" # use cached version if possible
- export NO_UPDATE_BUILD_VERSION=1
- make shared_lib -j$NPROC && sudo make install-shared
- cd ../..
# no need to cache, lmdb is small and compile very fast
- "export LMDBVER=0.9.22"
- "wget https://github.com/LMDB/lmdb/archive/LMDB_$LMDBVER.tar.gz && tar xvf LMDB_$LMDBVER.tar.gz;"
- cd lmdb-LMDB_$LMDBVER/libraries/liblmdb && make
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sed -i 's| liblmdb.a||' Makefile && sudo make prefix=/usr install; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then sudo cp liblmdb.so /usr/local/lib/liblmdb.dylib; fi
- cd ../../..
script:
- nimble install -y
- nimble test

View File

@ -1,6 +1,6 @@
import
endians, options, times,
stint, nimcrypto, rlp, eth_trie/[defs, db]
stint, nimcrypto, eth/rlp, eth/trie/[trie_defs, db]
export
stint, read, append, KeccakHash

View File

@ -1,5 +1,5 @@
import
eth_trie/[defs, db, hexary], rlp,
eth/trie/[defs, db, hexary], rlp,
eth_types
proc getAccount*(db: TrieDatabaseRef,

473
eth/rlp.nim Normal file
View File

@ -0,0 +1,473 @@
## This module implements RLP encoding and decoding as
## defined in Appendix B of the Ethereum Yellow Paper:
## https://ethereum.github.io/yellowpaper/paper.pdf
import
macros, strutils, parseutils,
rlp/[types, writer, object_serialization],
rlp/priv/defs
export
types, writer, object_serialization
type
Rlp* = object
bytes: BytesRange
position: int
RlpNodeType* = enum
rlpBlob
rlpList
RlpNode* = object
case kind*: RlpNodeType
of rlpBlob:
bytes*: BytesRange
of rlpList:
elems*: seq[RlpNode]
RlpError* = object of Exception
MalformedRlpError* = object of RlpError
UnsupportedRlpError* = object of RlpError
RlpTypeMismatch* = object of RlpError
proc rlpFromBytes*(data: BytesRange): Rlp =
result.bytes = data
result.position = 0
const zeroBytesRlp* = Rlp()
proc rlpFromHex*(input: string): Rlp =
doAssert input.len mod 2 == 0,
"rlpFromHex expects a string with even number of characters (assuming two characters per byte)"
var startByte = if input.len >= 2 and input[0] == '0' and input[1] == 'x': 2
else: 0
let totalBytes = (input.len - startByte) div 2
var backingStore = newSeq[byte](totalBytes)
for i in 0 ..< totalBytes:
var nextByte: int
if parseHex(input, nextByte, startByte + i*2, 2) == 2:
backingStore[i] = byte(nextByte)
else:
doAssert false, "rlpFromHex expects a hexademical string, but the input contains non hexademical characters"
result.bytes = backingStore.toRange()
{.this: self.}
proc hasData*(self: Rlp): bool =
position < bytes.len
proc currentElemEnd(self: Rlp): int {.gcsafe.}
proc rawData*(self: Rlp): BytesRange =
return self.bytes[position ..< self.currentElemEnd]
proc isBlob*(self: Rlp): bool =
hasData() and bytes[position] < LIST_START_MARKER
proc isEmpty*(self: Rlp): bool =
### Contains a blob or a list of zero length
hasData() and (bytes[position] == BLOB_START_MARKER or
bytes[position] == LIST_START_MARKER)
proc isList*(self: Rlp): bool =
hasData() and bytes[position] >= LIST_START_MARKER
template eosError =
raise newException(MalformedRlpError, "Read past the end of the RLP stream")
template requireData {.dirty.} =
if not hasData():
raise newException(MalformedRlpError, "Illegal operation over an empty RLP stream")
proc getType*(self: Rlp): RlpNodeType =
requireData()
return if isBlob(): rlpBlob else: rlpList
proc lengthBytesCount(self: Rlp): int =
var marker = bytes[position]
if isBlob() and marker > LEN_PREFIXED_BLOB_MARKER:
return int(marker - LEN_PREFIXED_BLOB_MARKER)
if isList() and marker > LEN_PREFIXED_LIST_MARKER:
return int(marker - LEN_PREFIXED_LIST_MARKER)
return 0
proc isSingleByte*(self: Rlp): bool =
hasData() and bytes[position] < BLOB_START_MARKER
proc getByteValue*(self: Rlp): byte =
assert self.isSingleByte()
return bytes[position]
proc payloadOffset(self: Rlp): int =
if isSingleByte(): 0 else: 1 + lengthBytesCount()
template readAheadCheck(numberOfBytes) =
if position + numberOfBytes >= bytes.len: eosError()
template nonCanonicalNumberError =
raise newException(MalformedRlpError, "Small number encoded in a non-canonical way")
proc payloadBytesCount(self: Rlp): int =
if not hasData():
return 0
var marker = bytes[position]
if marker < BLOB_START_MARKER:
return 1
if marker <= LEN_PREFIXED_BLOB_MARKER:
result = int(marker - BLOB_START_MARKER)
readAheadCheck(result)
if result == 1:
if bytes[position + 1] < BLOB_START_MARKER:
nonCanonicalNumberError()
return
template readInt(startMarker, lenPrefixMarker) =
var
lengthBytes = int(marker - lenPrefixMarker)
remainingBytes = self.bytes.len - self.position
if remainingBytes <= lengthBytes:
eosError()
if remainingBytes > 1 and self.bytes[self.position + 1] == 0:
raise newException(MalformedRlpError, "Number encoded with a leading zero")
if lengthBytes > sizeof(result):
raise newException(UnsupportedRlpError, "Message too large to fit in memory")
for i in 1 .. lengthBytes:
result = (result shl 8) or int(self.bytes[self.position + i])
# must be greater than the short-list size list
if result < THRESHOLD_LIST_LEN:
nonCanonicalNumberError()
if marker < LIST_START_MARKER:
readInt(BLOB_START_MARKER, LEN_PREFIXED_BLOB_MARKER)
elif marker <= LEN_PREFIXED_LIST_MARKER:
result = int(marker - LIST_START_MARKER)
else:
readInt(LIST_START_MARKER, LEN_PREFIXED_LIST_MARKER)
readAheadCheck(result)
proc blobLen*(self: Rlp): int =
if isBlob(): payloadBytesCount() else: 0
proc isInt*(self: Rlp): bool =
if not hasData():
return false
var marker = bytes[position]
if marker < BLOB_START_MARKER:
return marker != 0
if marker == BLOB_START_MARKER:
return true
if marker <= LEN_PREFIXED_BLOB_MARKER:
return bytes[position + 1] != 0
if marker < LIST_START_MARKER:
let offset = position + int(marker + 1 - LEN_PREFIXED_BLOB_MARKER)
if offset >= bytes.len: eosError()
return bytes[offset] != 0
return false
template maxBytes*(o: type[Ordinal | uint64 | uint]): int = sizeof(o)
proc toInt*(self: Rlp, IntType: type): IntType =
# XXX: work-around a Nim issue with type parameters
type OutputType = IntType
mixin maxBytes, to
# XXX: self insertions are not working in generic procs
# https://github.com/nim-lang/Nim/issues/5053
if not self.hasData():
raise newException(RlpTypeMismatch, "Attempt to read an Int value past the RLP end")
if self.isList():
raise newException(RlpTypeMismatch, "Int expected, but found a List")
let
payloadStart = self.payloadOffset()
payloadSize = self.payloadBytesCount()
if payloadSize > maxBytes(IntType):
raise newException(RlpTypeMismatch, "The RLP contains a larger than expected Int value")
for i in payloadStart ..< (payloadStart + payloadSize):
result = (result shl 8) or OutputType(self.bytes[self.position + i])
proc toString*(self: Rlp): string =
if not isBlob():
raise newException(RlpTypeMismatch, "String expected, but the source RLP is not a blob")
let
payloadOffset = payloadOffset()
payloadLen = payloadBytesCount()
remainingBytes = bytes.len - position - payloadOffset
if payloadLen > remainingBytes:
eosError()
result = newString(payloadLen)
for i in 0 ..< payloadLen:
# XXX: switch to copyMem here
result[i] = char(bytes[position + payloadOffset + i])
proc toBytes*(self: Rlp): BytesRange =
if not isBlob():
raise newException(RlpTypeMismatch,
"Bytes expected, but the source RLP in not a blob")
let payloadLen = payloadBytesCount()
if payloadLen > 0:
let
payloadOffset = payloadOffset()
ibegin = position + payloadOffset
iend = ibegin + payloadLen - 1
result = bytes.slice(ibegin, iend)
proc currentElemEnd(self: Rlp): int =
assert hasData()
result = position
if isSingleByte():
result += 1
elif isBlob() or isList():
result += payloadOffset() + payloadBytesCount()
proc enterList*(self: var Rlp) =
assert isList()
position += payloadOffset()
proc skipElem*(rlp: var Rlp) =
rlp.position = rlp.currentElemEnd
iterator items*(self: var Rlp): var Rlp =
assert isList()
var
payloadOffset = payloadOffset()
payloadEnd = position + payloadOffset + payloadBytesCount()
if payloadEnd > bytes.len:
raise newException(MalformedRlpError, "List length extends past the end of the stream")
position += payloadOffset
while position < payloadEnd:
let elemEnd = currentElemEnd()
yield self
position = elemEnd
proc listElem*(self: Rlp, i: int): Rlp =
let payload = bytes.slice(position + payloadOffset())
result = rlpFromBytes payload
var pos = 0
while pos < i and result.hasData:
result.position = result.currentElemEnd()
inc pos
proc listLen*(self: Rlp): int =
if not isList():
return 0
var rlp = self
for elem in rlp:
inc result
proc readImpl(rlp: var Rlp, T: type string): string =
result = rlp.toString
rlp.skipElem
proc readImpl(rlp: var Rlp, T: type Integer): Integer =
result = rlp.toInt(T)
rlp.skipElem
proc readImpl(rlp: var Rlp, T: type[enum]): T =
result = type(result)(rlp.toInt(int))
rlp.skipElem
proc readImpl(rlp: var Rlp, T: type bool): T =
result = rlp.toInt(int) != 0
rlp.skipElem
proc readImpl(rlp: var Rlp, T: type float64): T =
# This is not covered in the RLP spec, but Geth uses Go's
# `math.Float64bits`, which is defined here:
# https://github.com/gopherjs/gopherjs/blob/master/compiler/natives/src/math/math.go
let uint64bits = rlp.toInt(uint64)
var uint32parts = [uint32(uint64bits), uint32(uint64bits shr 32)]
return cast[ptr float64](unsafeAddr uint32parts)[]
proc readImpl[R, E](rlp: var Rlp, T: type array[R, E]): T =
mixin read
when E is (byte or char):
if not rlp.isBlob:
raise newException(RlpTypeMismatch, "Bytes array expected, but the source RLP is not a blob.")
var bytes = rlp.toBytes
if result.len != bytes.len:
raise newException(RlpTypeMismatch, "Fixed-size array expected, but the source RLP contains a blob of different lenght")
copyMem(addr result[0], bytes.baseAddr, bytes.len)
rlp.skipElem
else:
if not rlp.isList:
raise newException(RlpTypeMismatch, "List expected, but the source RLP is not a list.")
if result.len != rlp.listLen:
raise newException(RlpTypeMismatch, "Fixed-size array expected, but the source RLP contains a list of different length")
var i = 0
for elem in rlp:
result[i] = rlp.read(E)
inc i
proc readImpl[E](rlp: var Rlp, T: type seq[E]): T =
mixin read
when E is (byte or char):
var bytes = rlp.toBytes
if bytes.len != 0:
result = newSeq[byte](bytes.len)
copyMem(addr result[0], bytes.baseAddr, bytes.len)
rlp.skipElem
else:
if not rlp.isList:
raise newException(RlpTypeMismatch, "Sequence expected, but the source RLP is not a list.")
result = newSeqOfCap[E](rlp.listLen)
for elem in rlp:
result.add rlp.read(E)
proc readImpl[E](rlp: var Rlp, T: type openarray[E]): seq[E] =
result = readImpl(rlp, seq[E])
proc readImpl(rlp: var Rlp, T: type[object|tuple],
wrappedInList = wrapObjsInList): T =
mixin enumerateRlpFields, read
if wrappedInList:
var
payloadOffset = rlp.payloadOffset()
payloadEnd = rlp.position + payloadOffset + rlp.payloadBytesCount()
rlp.position += payloadOffset
template op(field) =
when hasCustomPragma(field, rlpCustomSerialization):
field = rlp.read(result, type(field))
else:
field = rlp.read(type(field))
enumerateRlpFields(result, op)
proc toNodes*(self: var Rlp): RlpNode =
requireData()
if isList():
result.kind = rlpList
newSeq result.elems, 0
for e in self:
result.elems.add e.toNodes
else:
assert isBlob()
result.kind = rlpBlob
result.bytes = toBytes()
position = currentElemEnd()
# We define a single `read` template with a pretty low specifity
# score in order to facilitate easier overloading with user types:
template read*(rlp: var Rlp, T: type): auto =
readImpl(rlp, T)
proc `>>`*[T](rlp: var Rlp, location: var T) =
mixin read
location = rlp.read(T)
template readRecordType*(rlp: var Rlp, T: type, wrappedInList: bool): auto =
readImpl(rlp, T, wrappedInList)
proc decode*(bytes: openarray[byte]): RlpNode =
var
bytesCopy = @bytes
rlp = rlpFromBytes(bytesCopy.toRange())
return rlp.toNodes
template decode*(bytes: BytesRange, T: type): untyped =
mixin read
var rlp = rlpFromBytes(bytes)
rlp.read(T)
template decode*(bytes: openarray[byte], T: type): T =
var bytesCopy = @bytes
decode(bytesCopy.toRange, T)
template decode*(bytes: seq[byte], T: type): untyped =
decode(bytes.toRange, T)
proc append*(writer: var RlpWriter; rlp: Rlp) =
appendRawBytes(writer, rlp.rawData)
proc isPrintable(s: string): bool =
for c in s:
if ord(c) < 32 or ord(c) >= 128:
return false
return true
proc inspectAux(self: var Rlp, depth: int, hexOutput: bool, output: var string) =
if not hasData():
return
template indent =
for i in 0..<depth:
output.add " "
indent()
if self.isSingleByte:
output.add "byte "
output.add $bytes[position]
elif self.isBlob:
let str = self.toString
if str.isPrintable:
output.add '"'
output.add str
output.add '"'
else:
output.add "blob(" & $str.len & ") ["
for c in str:
if hexOutput:
output.add toHex(int(c), 2)
else:
output.add $ord(c)
output.add ","
if hexOutput:
output.add ']'
else:
output[^1] = ']'
else:
output.add "{\n"
for subitem in self:
inspectAux(subitem, depth + 1, hexOutput, output)
output.add "\n"
indent()
output.add "}"
proc inspect*(self: Rlp, indent = 0, hexOutput = true): string =
var rlpCopy = self
result = newStringOfCap(bytes.len)
inspectAux(rlpCopy, indent, hexOutput, result)

View File

@ -0,0 +1,7 @@
import os, ../../rlp
if paramCount() > 0:
echo rlpFromHex(paramStr(1)).inspect
else:
echo "Please provide an hex-encoded RLP string as an input"

View File

@ -0,0 +1,44 @@
import macros
template rlpIgnore* {.pragma.}
## Specifies that a certain field should be ignored for the purposes
## of RLP serialization
template rlpInline* {.pragma.}
## This can be specified on a record field in order to avoid the
## default behavior of wrapping the record in a RLP list.
template rlpCustomSerialization* {.pragma.}
## This pragma can be applied to a record field to enable the
## use of custom `read` and `append` overloads that also take
## a reference to the object holding the field.
template enumerateRlpFields*[T](x: T, op: untyped) =
for f in fields(x):
when not hasCustomPragma(f, rlpIgnore):
op(f)
proc rlpFieldsCount*(T: type): int =
mixin enumerateRlpFields
proc helper: int =
var dummy: T
template countFields(x) = inc result
enumerateRlpFields(dummy, countFields)
const res = helper()
return res
macro rlpFields*(T: typedesc, fields: varargs[untyped]): untyped =
var body = newStmtList()
let
ins = genSym(nskParam, "instance")
op = genSym(nskParam, "op")
for field in fields:
body.add quote do: `op`(`ins`.`field`)
result = quote do:
template enumerateRlpFields*(`ins`: `T`, `op`: untyped) {.inject.} =
`body`

14
eth/rlp/priv/defs.nim Normal file
View File

@ -0,0 +1,14 @@
import
../types
const
MAX_LENGTH_BYTES* = 8
BLOB_START_MARKER* = byte(128)
LIST_START_MARKER* = byte(192)
THRESHOLD_LIST_LEN* = 56
LEN_PREFIXED_BLOB_MARKER* = byte(BLOB_START_MARKER + THRESHOLD_LIST_LEN - 1) # 183
LEN_PREFIXED_LIST_MARKER* = byte(LIST_START_MARKER + THRESHOLD_LIST_LEN - 1) # 247

6
eth/rlp/types.nim Normal file
View File

@ -0,0 +1,6 @@
import ranges
export ranges
type
Bytes* = seq[byte]
BytesRange* = Range[byte]

299
eth/rlp/writer.nim Normal file
View File

@ -0,0 +1,299 @@
import
macros, types,
ranges/[memranges, ptr_arith],
object_serialization, priv/defs
export
memranges
type
RlpWriter* = object
pendingLists: seq[tuple[remainingItems, outBytes: int]]
output: Bytes
PrematureFinalizationError* = object of Exception
IntLike* = concept x, y
type T = type(x)
# arithmetic ops
x + y is T
x * y is T
x - y is T
x div y is T
x mod y is T
# some int compatibility required for big endian encoding:
x shr int is T
x shl int is T
x and 0xff is int
x < 128 is bool
Integer* = SomeInteger # or IntLike
const
wrapObjsInList* = true
proc bytesNeeded(num: Integer): int =
type IntType = type(num)
var n = num
while n != IntType(0):
inc result
n = n shr 8
proc writeBigEndian(outStream: var Bytes, number: Integer,
lastByteIdx: int, numberOfBytes: int) =
mixin `and`, `shr`
var n = number
for i in countdown(lastByteIdx, lastByteIdx - int(numberOfBytes) + 1):
outStream[i] = byte(n and 0xff)
n = n shr 8
proc writeBigEndian(outStream: var Bytes, number: Integer,
numberOfBytes: int) {.inline.} =
outStream.setLen(outStream.len + numberOfBytes)
outStream.writeBigEndian(number, outStream.len - 1, numberOfBytes)
proc writeCount(bytes: var Bytes, count: int, baseMarker: byte) =
if count < THRESHOLD_LIST_LEN:
bytes.add(baseMarker + byte(count))
else:
let
origLen = bytes.len
lenPrefixBytes = count.bytesNeeded
bytes.setLen(origLen + int(lenPrefixBytes) + 1)
bytes[origLen] = baseMarker + (THRESHOLD_LIST_LEN - 1) + byte(lenPrefixBytes)
bytes.writeBigEndian(count, bytes.len - 1, lenPrefixBytes)
proc add(outStream: var Bytes, newChunk: BytesRange) =
let prevLen = outStream.len
outStream.setLen(prevLen + newChunk.len)
# XXX: Use copyMem here
for i in 0 ..< newChunk.len:
outStream[prevLen + i] = newChunk[i]
{.this: self.}
{.experimental.}
using
self: var RlpWriter
proc initRlpWriter*: RlpWriter =
newSeq(result.pendingLists, 0)
newSeq(result.output, 0)
proc decRet(n: var int, delta: int): int =
n -= delta
return n
proc maybeClosePendingLists(self) =
while pendingLists.len > 0:
let lastListIdx = pendingLists.len - 1
assert pendingLists[lastListIdx].remainingItems >= 1
if decRet(pendingLists[lastListIdx].remainingItems, 1) == 0:
# A list have been just finished. It was started in `startList`.
let listStartPos = pendingLists[lastListIdx].outBytes
pendingLists.setLen lastListIdx
# How many bytes were written since the start?
let listLen = output.len - listStartPos
# Compute the number of bytes required to write down the list length
let totalPrefixBytes = if listLen < int(THRESHOLD_LIST_LEN): 1
else: int(listLen.bytesNeeded) + 1
# Shift the written data to make room for the prefix length
output.setLen(output.len + totalPrefixBytes)
let outputBaseAddr = output.baseAddr
moveMem(outputBaseAddr.shift(listStartPos + totalPrefixBytes),
outputBaseAddr.shift(listStartPos),
listLen)
# Write out the prefix length
if listLen < THRESHOLD_LIST_LEN:
output[listStartPos] = LIST_START_MARKER + byte(listLen)
else:
let listLenBytes = totalPrefixBytes - 1
output[listStartPos] = LEN_PREFIXED_LIST_MARKER + byte(listLenBytes)
output.writeBigEndian(listLen, listStartPos + listLenBytes, listLenBytes)
else:
# The currently open list is not finished yet. Nothing to do.
return
proc appendRawList(self; bytes: BytesRange) =
output.writeCount(bytes.len, LIST_START_MARKER)
output.add(bytes)
maybeClosePendingLists()
proc appendRawBytes*(self; bytes: BytesRange) =
output.add(bytes)
maybeClosePendingLists()
proc startList*(self; listSize: int) =
if listSize == 0:
appendRawList(BytesRange())
else:
pendingLists.add((listSize, output.len))
template appendBlob(self; data, startMarker) =
mixin baseAddr
if data.len == 1 and byte(data[0]) < BLOB_START_MARKER:
self.output.add byte(data[0])
else:
self.output.writeCount(data.len, startMarker)
let startPos = output.len
self.output.setLen(startPos + data.len)
copyMem(shift(baseAddr(self.output), startPos),
baseAddr(data),
data.len)
maybeClosePendingLists()
proc appendImpl(self; data: string) =
appendBlob(self, data, BLOB_START_MARKER)
proc appendBlob(self; data: openarray[byte]) =
appendBlob(self, data, BLOB_START_MARKER)
proc appendBlob(self; data: openarray[char]) =
appendBlob(self, data, BLOB_START_MARKER)
proc appendBytesRange(self; data: BytesRange) =
appendBlob(self, data, BLOB_START_MARKER)
proc appendImpl(self; data: MemRange) =
appendBlob(self, data, BLOB_START_MARKER)
proc appendInt(self; i: Integer) =
# this is created as a separate proc as an extra precaution against
# any overloading resolution problems when matching the IntLike concept.
type IntType = type(i)
if i == IntType(0):
self.output.add BLOB_START_MARKER
elif i < BLOB_START_MARKER.Integer:
self.output.add byte(i)
else:
let bytesNeeded = i.bytesNeeded
self.output.writeCount(bytesNeeded, BLOB_START_MARKER)
self.output.writeBigEndian(i, bytesNeeded)
self.maybeClosePendingLists()
proc appendFloat(self; data: float64) =
# This is not covered in the RLP spec, but Geth uses Go's
# `math.Float64bits`, which is defined here:
# https://github.com/gopherjs/gopherjs/blob/master/compiler/natives/src/math/math.go
let uintWords = cast[ptr UncheckedArray[uint32]](unsafeAddr data)
let uint64bits = (uint64(uintWords[1]) shl 32) or uint64(uintWords[0])
self.appendInt(uint64bits)
template appendImpl(self; i: Integer) =
appendInt(self, i)
template appendImpl(self; e: enum) =
appendImpl(self, int(e))
template appendImpl(self; b: bool) =
appendImpl(self, int(b))
proc appendImpl[T](self; listOrBlob: openarray[T]) =
mixin append
# TODO: This append proc should be overloaded by `openarray[byte]` after
# nim bug #7416 is fixed.
when T is (byte or char):
self.appendBlob(listOrBlob)
else:
self.startList listOrBlob.len
for i in 0 ..< listOrBlob.len:
self.append listOrBlob[i]
proc appendRecordType*(self; obj: object|tuple, wrapInList = wrapObjsInList) =
mixin enumerateRlpFields, append
if wrapInList:
self.startList(static obj.type.rlpFieldsCount)
template op(field) =
when hasCustomPragma(field, rlpCustomSerialization):
append(self, obj, field)
else:
append(self, field)
enumerateRlpFields(obj, op)
proc appendImpl(self; data: object) {.inline.} =
# TODO: This append proc should be overloaded by `BytesRange` after
# nim bug #7416 is fixed.
when data is BytesRange:
self.appendBytesRange(data)
else:
self.appendRecordType(data)
proc appendImpl(self; data: tuple) {.inline.} =
self.appendRecordType(data)
# We define a single `append` template with a pretty low specifity
# score in order to facilitate easier overloading with user types:
template append*[T](w: var RlpWriter; data: T) =
when data is float64:
# XXX: This works around an overloading bug.
# Apparently, integer literals will be converted to `float64`
# values with higher precedence than the generic match to Integer
appendFloat(w, data)
else:
appendImpl(w, data)
proc initRlpList*(listSize: int): RlpWriter =
result = initRlpWriter()
startList(result, listSize)
# TODO: This should return a lent value
proc finish*(self): Bytes =
if pendingLists.len > 0:
raise newException(PrematureFinalizationError,
"Insufficient number of elements written to a started list")
result = output
proc encode*[T](v: T): Bytes =
mixin append
var writer = initRlpWriter()
writer.append(v)
return writer.finish
proc encodeInt*(i: Integer): Bytes =
var writer = initRlpWriter()
writer.appendInt(i)
return writer.finish
macro encodeList*(args: varargs[untyped]): Bytes =
var
listLen = args.len
writer = genSym(nskVar, "rlpWriter")
body = newStmtList()
append = bindSym("append", brForceOpen)
for arg in args:
body.add quote do:
`append`(`writer`, `arg`)
result = quote do:
var `writer` = initRlpList(`listLen`)
`body`
finish(`writer`)
when false:
# XXX: Currently fails with a malformed AST error on the args.len expression
template encodeList*(args: varargs[untyped]): BytesRange =
mixin append
var writer = initRlpList(args.len)
for arg in args:
writer.append(arg)
writer.finish

6
eth/trie.nim Normal file
View File

@ -0,0 +1,6 @@
import
trie/[hexary, sparse_binary]
export
hexary, sparse_binary

View File

@ -0,0 +1,18 @@
type
StorageError* = object of Exception
template raiseStorageInitError* =
raise newException(StorageError, "failure to initialize storage")
template raiseKeyReadError*(key: auto) =
raise newException(StorageError, "failed to read key " & $key)
template raiseKeyWriteError*(key: auto) =
raise newException(StorageError, "failed to write key " & $key)
template raiseKeySearchError*(key: auto) =
raise newException(StorageError, "failure during search for key " & $key)
template raiseKeyDeletionError*(key: auto) =
raise newException(StorageError, "failure to delete key " & $key)

View File

@ -0,0 +1,45 @@
import
ranges, tables, sets,
eth/trie/db
type
CachingDB* = ref object of RootObj
backing: TrieDatabaseRef
changed: Table[seq[byte], seq[byte]]
deleted: HashSet[seq[byte]]
proc newCachingDB*(backing: TrieDatabaseRef): CachingDB =
result.new()
result.backing = backing
result.changed = initTable[seq[byte], seq[byte]]()
result.deleted = initSet[seq[byte]]()
proc get*(db: CachingDB, key: openarray[byte]): seq[byte] =
let key = @key
result = db.changed.getOrDefault(key)
if result.len == 0 and key notin db.deleted:
result = db.backing.get(key)
proc put*(db: CachingDB, key, value: openarray[byte]) =
let key = @key
db.deleted.excl(key)
db.changed[key] = @value
proc contains*(db: CachingDB, key: openarray[byte]): bool =
let key = @key
result = key in db.changed
if not result and key notin db.deleted:
result = db.backing.contains(key)
proc del*(db: CachingDB, key: openarray[byte]) =
let key = @key
db.changed.del(key)
db.deleted.incl(key)
proc commit*(db: CachingDB) =
for k in db.deleted:
db.backing.del(k)
for k, v in db.changed:
db.backing.put(k, v)

View File

@ -0,0 +1,164 @@
import os, ranges, eth/trie/[trie_defs, db_tracing]
import backend_defs
when defined(windows):
const Lib = "lmdb.dll"
elif defined(macosx):
const Lib = "liblmdb.dylib"
else:
const Lib = "liblmdb.so"
const
MDB_NOSUBDIR = 0x4000
MDB_NOTFOUND = -30798
when defined(cpu64):
const LMDB_MAP_SIZE = 1024'u64 * 1024'u64 * 1024'u64 * 10'u64 # 10TB enough?
else:
const LMDB_MAP_SIZE = 1024'u64 * 1024'u64 * 1024'u64 # 32bit limitation
type
MDB_Env = distinct pointer
MDB_Txn = distinct pointer
MDB_Dbi = distinct cuint
MDB_val = object
mv_size: csize
mv_data: pointer
# this is only a subset of LMDB API needed in nimbus
proc mdb_env_create(env: var MDB_Env): cint {.cdecl, dynlib: Lib, importc: "mdb_env_create".}
proc mdb_env_open(env: MDB_Env, path: cstring, flags: cuint, mode: cint): cint {.cdecl, dynlib: Lib, importc: "mdb_env_open".}
proc mdb_txn_begin(env: MDB_Env, parent: MDB_Txn, flags: cuint, txn: var MDB_Txn): cint {.cdecl, dynlib: Lib, importc: "mdb_txn_begin".}
proc mdb_txn_commit(txn: MDB_Txn): cint {.cdecl, dynlib: Lib, importc: "mdb_txn_commit".}
proc mdb_dbi_open(txn: MDB_Txn, name: cstring, flags: cuint, dbi: var MDB_Dbi): cint {.cdecl, dynlib: Lib, importc: "mdb_dbi_open".}
proc mdb_dbi_close(env: MDB_Env, dbi: MDB_Dbi) {.cdecl, dynlib: Lib, importc: "mdb_dbi_close".}
proc mdb_env_close(env: MDB_Env) {.cdecl, dynlib: Lib, importc: "mdb_env_close".}
proc mdb_get(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val): cint {.cdecl, dynlib: Lib, importc: "mdb_get".}
proc mdb_del(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: ptr MDB_val): cint {.cdecl, dynlib: Lib, importc: "mdb_del".}
proc mdb_put(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val, flags: cuint): cint {.cdecl, dynlib: Lib, importc: "mdb_put".}
proc mdb_env_set_mapsize(env: MDB_Env, size: uint64): cint {.cdecl, dynlib: Lib, importc: "mdb_env_set_mapsize".}
type
LmdbChainDB* = ref object of RootObj
env: MDB_Env
txn: MDB_Txn
dbi: MDB_Dbi
manualCommit: bool
ChainDB* = LmdbChainDB
# call txBegin and txCommit if you want to disable auto-commit
proc txBegin*(db: ChainDB, manualCommit = true): bool =
result = true
if manualCommit:
db.manualCommit = true
else:
if db.manualCommit: return
result = mdb_txn_begin(db.env, MDB_Txn(nil), 0, db.txn) == 0
result = result and mdb_dbi_open(db.txn, nil, 0, db.dbi) == 0
proc txCommit*(db: ChainDB, manualCommit = true): bool =
result = true
if manualCommit:
db.manualCommit = false
else:
if db.manualCommit: return
result = mdb_txn_commit(db.txn) == 0
mdb_dbi_close(db.env, db.dbi)
proc toMdbVal(val: openArray[byte]): MDB_Val =
result.mv_size = val.len
result.mv_data = unsafeAddr val[0]
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
if key.len == 0: return
var
dbKey = toMdbVal(key)
dbVal: MDB_val
if not db.txBegin(false):
raiseKeyReadError(key)
var errCode = mdb_get(db.txn, db.dbi, dbKey, dbVal)
if not(errCode == 0 or errCode == MDB_NOTFOUND):
raiseKeyReadError(key)
if dbVal.mv_size > 0 and errCode == 0:
result = newSeq[byte](dbVal.mv_size.int)
copyMem(result[0].addr, dbVal.mv_data, result.len)
else:
result = @[]
traceGet key, result
if not db.txCommit(false):
raiseKeyReadError(key)
proc put*(db: ChainDB, key, value: openarray[byte]) =
tracePut key, value
if key.len == 0 or value.len == 0: return
var
dbKey = toMdbVal(key)
dbVal = toMdbVal(value)
if not db.txBegin(false):
raiseKeyWriteError(key)
var ok = mdb_put(db.txn, db.dbi, dbKey, dbVal, 0) == 0
if not ok:
raiseKeyWriteError(key)
if not db.txCommit(false):
raiseKeyWriteError(key)
proc contains*(db: ChainDB, key: openarray[byte]): bool =
if key.len == 0: return
var
dbKey = toMdbVal(key)
dbVal: MDB_val
if not db.txBegin(false):
raiseKeySearchError(key)
result = mdb_get(db.txn, db.dbi, dbKey, dbVal) == 0
if not db.txCommit(false):
raiseKeySearchError(key)
proc del*(db: ChainDB, key: openarray[byte]) =
traceDel key
if key.len == 0: return
var
dbKey = toMdbVal(key)
if not db.txBegin(false):
raiseKeyDeletionError(key)
var errCode = mdb_del(db.txn, db.dbi, dbKey, nil)
if not(errCode == 0 or errCode == MDB_NOTFOUND):
raiseKeyDeletionError(key)
if not db.txCommit(false):
raiseKeyDeletionError(key)
proc close*(db: ChainDB) =
mdb_env_close(db.env)
proc newChainDB*(basePath: string): ChainDB =
result.new()
let dataDir = basePath / "nimbus.db"
var ok = mdb_env_create(result.env) == 0
if not ok: raiseStorageInitError()
ok = mdb_env_set_mapsize(result.env, LMDB_MAP_SIZE) == 0
if not ok: raiseStorageInitError()
# file mode ignored on windows
ok = mdb_env_open(result.env, dataDir, MDB_NOSUBDIR.cuint, 0o664.cint) == 0
if not ok: raiseStorageInitError()
result.put(emptyRlpHash.data, emptyRlp)

View File

@ -0,0 +1,51 @@
import os, rocksdb, ranges, eth/trie/[trie_defs, db_tracing]
import backend_defs
type
RocksChainDB* = ref object of RootObj
store: RocksDBInstance
ChainDB* = RocksChainDB
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
let s = db.store.getBytes(key)
if s.ok:
result = s.value
traceGet key, result
elif s.error.len == 0:
discard
else:
raiseKeyReadError(key)
proc put*(db: ChainDB, key, value: openarray[byte]) =
tracePut key, value
let s = db.store.put(key, value)
if not s.ok: raiseKeyWriteError(key)
proc contains*(db: ChainDB, key: openarray[byte]): bool =
let s = db.store.contains(key)
if not s.ok: raiseKeySearchError(key)
return s.value
proc del*(db: ChainDB, key: openarray[byte]) =
traceDel key
let s = db.store.del(key)
if not s.ok: raiseKeyDeletionError(key)
proc close*(db: ChainDB) =
db.store.close
proc newChainDB*(basePath: string): ChainDB =
result.new()
let
dataDir = basePath / "data"
backupsDir = basePath / "backups"
createDir(dataDir)
createDir(backupsDir)
let s = result.store.init(dataDir, backupsDir)
if not s.ok: raiseStorageInitError()
put(result, emptyRlpHash.data, emptyRlp)

View File

@ -0,0 +1,131 @@
import
os, sqlite3, ranges, ranges/ptr_arith, eth/trie/[db_tracing, trie_defs],
backend_defs
type
SqliteChainDB* = ref object of RootObj
store: PSqlite3
selectStmt, insertStmt, deleteStmt: PStmt
ChainDB* = SqliteChainDB
proc put*(db: ChainDB, key, value: openarray[byte])
proc newChainDB*(basePath: string, inMemory = false): ChainDB =
result.new()
let dbPath = if inMemory: ":memory:" else: basePath / "nimbus.db"
var s = sqlite3.open(dbPath, result.store)
if s != SQLITE_OK:
raiseStorageInitError()
template execQuery(q: string) =
var s: Pstmt
if prepare_v2(result.store, q, q.len.int32, s, nil) == SQLITE_OK:
if step(s) != SQLITE_DONE or finalize(s) != SQLITE_OK:
raiseStorageInitError()
else:
raiseStorageInitError()
# TODO: check current version and implement schema versioning
execQuery "PRAGMA user_version = 1;"
execQuery """
CREATE TABLE IF NOT EXISTS trie_nodes(
key BLOB PRIMARY KEY,
value BLOB
);
"""
template prepare(q: string): PStmt =
var s: Pstmt
if prepare_v2(result.store, q, q.len.int32, s, nil) != SQLITE_OK:
raiseStorageInitError()
s
result.selectStmt = prepare "SELECT value FROM trie_nodes WHERE key = ?;"
if sqlite3.libversion_number() < 3024000:
result.insertStmt = prepare """
INSERT OR REPLACE INTO trie_nodes(key, value) VALUES (?, ?);
"""
else:
result.insertStmt = prepare """
INSERT INTO trie_nodes(key, value) VALUES (?, ?)
ON CONFLICT(key) DO UPDATE SET value = excluded.value;
"""
result.deleteStmt = prepare "DELETE FROM trie_nodes WHERE key = ?;"
put(result, emptyRlpHash.data, emptyRlp)
proc bindBlob(s: Pstmt, n: int, blob: openarray[byte]): int32 =
sqlite3.bind_blob(s, n.int32, blob.baseAddr, blob.len.int32, nil)
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
template check(op) =
let status = op
if status != SQLITE_OK: raiseKeyReadError(key)
check reset(db.selectStmt)
check clearBindings(db.selectStmt)
check bindBlob(db.selectStmt, 1, key)
case step(db.selectStmt)
of SQLITE_ROW:
var
resStart = columnBlob(db.selectStmt, 0)
resLen = columnBytes(db.selectStmt, 0)
result = newSeq[byte](resLen)
copyMem(result.baseAddr, resStart, resLen)
traceGet key, result
of SQLITE_DONE:
discard
else:
raiseKeyReadError(key)
proc put*(db: ChainDB, key, value: openarray[byte]) =
tracePut key, value
template check(op) =
let status = op
if status != SQLITE_OK: raiseKeyWriteError(key)
check reset(db.insertStmt)
check clearBindings(db.insertStmt)
check bindBlob(db.insertStmt, 1, key)
check bindBlob(db.insertStmt, 2, value)
if step(db.insertStmt) != SQLITE_DONE:
raiseKeyWriteError(key)
proc contains*(db: ChainDB, key: openarray[byte]): bool =
template check(op) =
let status = op
if status != SQLITE_OK: raiseKeySearchError(key)
check reset(db.selectStmt)
check clearBindings(db.selectStmt)
check bindBlob(db.selectStmt, 1, key)
case step(db.selectStmt)
of SQLITE_ROW: result = true
of SQLITE_DONE: result = false
else: raiseKeySearchError(key)
proc del*(db: ChainDB, key: openarray[byte]) =
traceDel key
template check(op) =
let status = op
if status != SQLITE_OK: raiseKeyDeletionError(key)
check reset(db.deleteStmt)
check clearBindings(db.deleteStmt)
check bindBlob(db.deleteStmt, 1, key)
if step(db.deleteStmt) != SQLITE_DONE:
raiseKeyDeletionError(key)
proc close*(db: ChainDB) =
discard sqlite3.close(db.store)
reset(db[])

143
eth/trie/binaries.nim Normal file
View File

@ -0,0 +1,143 @@
import
sequtils,
ranges/[ptr_arith, bitranges], eth/rlp/types
type
TrieNodeKind* = enum
KV_TYPE = 0
BRANCH_TYPE = 1
LEAF_TYPE = 2
TrieNodeKey* = BytesRange
TrieBitRange* = BitRange
TrieNode* = object
case kind*: TrieNodeKind
of KV_TYPE:
keyPath*: TrieBitRange
child*: TrieNodeKey
of BRANCH_TYPE:
leftChild*: TrieNodeKey
rightChild*: TrieNodeKey
of LEAF_TYPE:
value*: BytesRange
InvalidNode* = object of Exception
ValidationError* = object of Exception
# ----------------------------------------------
template sliceToEnd*(r: TrieBitRange, index: int): TrieBitRange =
if r.len <= index: TrieBitRange() else: r[index .. ^1]
proc decodeToBinKeypath*(path: BytesRange): TrieBitRange =
## Decodes bytes into a sequence of 0s and 1s
## Used in decoding key path of a KV-NODE
var path = MutByteRange(path).bits
if path[0]:
path = path[4..^1]
assert path[0] == false
assert path[1] == false
var bits = path[2].int shl 1
bits = bits or path[3].int
if path.len > 4:
result = path[4+((4 - bits) mod 4)..^1]
else:
result = BitRange()
proc parseNode*(node: BytesRange): TrieNode =
# Input: a serialized node
if node.len == 0:
raise newException(InvalidNode, "Blank node is not a valid node type in Binary Trie")
if node[0].ord < low(TrieNodeKind).ord or node[0].ord > high(TrieNodeKind).ord:
raise newException(InvalidNode, "Invalid node type")
let nodeType = node[0].TrieNodeKind
case nodeType
of BRANCH_TYPE:
if node.len != 65:
raise newException(InvalidNode, "Invalid branch node, both child node should be 32 bytes long each")
# Output: node type, left child, right child
result = TrieNode(kind: BRANCH_TYPE, leftChild: node[1..<33], rightChild: node[33..^1])
assert(result.leftChild.len == 32)
assert(result.rightChild.len == 32)
return result
of KV_TYPE:
if node.len <= 33:
raise newException(InvalidNode, "Invalid kv node, short of key path or child node hash")
# Output: node type, keypath, child
return TrieNode(kind: KV_TYPE, keyPath: decodeToBinKeypath(node[1..^33]), child: node[^32..^1])
of LEAF_TYPE:
if node.len == 1:
raise newException(InvalidNode, "Invalid leaf node, can not contain empty value")
# Output: node type, value
return TrieNode(kind: LEAF_TYPE, value: node[1..^1])
proc encodeKVNode*(keyPath: TrieBitRange, childHash: TrieNodeKey): Bytes =
## Serializes a key/value node
if keyPath.len == 0:
raise newException(ValidationError, "Key path can not be empty")
if childHash.len != 32:
raise newException(ValidationError, "Invalid hash len")
# Encodes a sequence of 0s and 1s into tightly packed bytes
# Used in encoding key path of a KV-NODE
# KV-NODE = KV-TYPE-PREFIX + encoded keypath + 32 bytes hash
let
len = keyPath.len
padding = ((not len) + 1) and 3 # modulo 4 padding
paddedBinLen = len + padding
prefix = len mod 4
result = newSeq[byte](((len + padding) div 8) + 34)
result[0] = KV_TYPE.byte
if paddedBinLen mod 8 == 4:
var nbits = 4 - padding
result[1] = byte(prefix shl 4) or byte.fromBits(keyPath, 0, nbits)
for i in 0..<(len div 8):
result[i+2] = byte.fromBits(keyPath, nbits, 8)
inc(nbits, 8)
else:
var nbits = 8 - padding
result[1] = byte(0b1000_0000) or byte(prefix)
result[2] = byte.fromBits(keyPath, 0, nbits)
for i in 0..<((len-1) div 8):
result[i+3] = byte.fromBits(keyPath, nbits, 8)
inc(nbits, 8)
copyMem(result[^32].addr, childHash.baseAddr, 32)
proc encodeKVNode*(keyPath: bool, childHash: TrieNodeKey): Bytes =
result = newSeq[byte](34)
result[0] = KV_TYPE.byte
result[1] = byte(16) or byte(keyPath)
copyMem(result[^32].addr, childHash.baseAddr, 32)
proc encodeBranchNode*(leftChildHash, rightChildHash: TrieNodeKey): Bytes =
## Serializes a branch node
const
BRANCH_TYPE_PREFIX = @[BRANCH_TYPE.byte]
if leftChildHash.len != 32 or rightChildHash.len != 32:
raise newException(ValidationError, "encodeBranchNode: Invalid hash len")
result = BRANCH_TYPE_PREFIX.concat(leftChildHash, rightChildHash)
proc encodeLeafNode*(value: BytesRange | Bytes): Bytes =
## Serializes a leaf node
const
LEAF_TYPE_PREFIX = @[LEAF_TYPE.byte]
if value.len == 0:
raise newException(ValidationError, "Value of leaf node can not be empty")
result = LEAF_TYPE_PREFIX.concat(value)
proc getCommonPrefixLength*(a, b: TrieBitRange): int =
let len = min(a.len, b.len)
for i in 0..<len:
if a[i] != b[i]: return i
result = len

287
eth/trie/binary.nim Normal file
View File

@ -0,0 +1,287 @@
import
ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types,
trie_defs, db, binaries, trie_utils
export
types, trie_utils
type
DB = TrieDatabaseRef
BinaryTrie* = object
db: DB
rootHash: TrieNodeKey
NodeOverrideError* = object of Exception
let
zeroHash* = zeroBytesRange
proc init*(x: typedesc[BinaryTrie], db: DB,
rootHash: BytesContainer | KeccakHash = zeroHash): BinaryTrie =
checkValidHashZ(rootHash)
result.db = db
result.rootHash = toRange(rootHash)
proc getDB*(t: BinaryTrie): auto = t.db
proc initBinaryTrie*(db: DB, rootHash: BytesContainer | KeccakHash): BinaryTrie =
init(BinaryTrie, db, rootHash)
proc initBinaryTrie*(db: DB): BinaryTrie =
init(BinaryTrie, db, zeroHash)
proc getRootHash*(self: BinaryTrie): TrieNodeKey {.inline.} =
self.rootHash
template fetchNode(self: BinaryTrie, nodeHash: TrieNodeKey): TrieNode =
assert(nodeHash.len == 32)
parseNode self.db.get(nodeHash.toOpenArray).toRange
proc getAux(self: BinaryTrie, nodeHash: TrieNodeKey, keyPath: TrieBitRange): BytesRange =
# Empty trie
if isZeroHash(nodeHash):
return zeroBytesRange
let node = self.fetchNode(nodeHash)
# Key-value node descend
if node.kind == LEAF_TYPE:
if keyPath.len != 0: return zeroBytesRange
return node.value
elif node.kind == KV_TYPE:
# keyPath too short
if keyPath.len == 0: return zeroBytesRange
let sliceLen = min(node.keyPath.len, keyPath.len)
if keyPath[0..<sliceLen] == node.keyPath:
return self.getAux(node.child, keyPath.sliceToEnd(node.keyPath.len))
else:
return zeroBytesRange
# Branch node descend
elif node.kind == BRANCH_TYPE:
# keyPath too short
if keyPath.len == 0: return zeroBytesRange
if keyPath[0]: # first bit == 1
return self.getAux(node.rightChild, keyPath.sliceToEnd(1))
else:
return self.getAux(node.leftChild, keyPath.sliceToEnd(1))
proc get*(self: BinaryTrie, key: BytesContainer): BytesRange {.inline.} =
var keyBits = MutByteRange(key.toRange).bits
return self.getAux(self.rootHash, keyBits)
proc hashAndSave*(self: BinaryTrie, node: BytesRange | Bytes): TrieNodeKey =
result = keccakHash(node)
self.db.put(result.toOpenArray, node.toRange.toOpenArray)
template saveKV(self: BinaryTrie, keyPath: TrieBitRange | bool, child: BytesRange): untyped =
self.hashAndsave(encodeKVNode(keyPath, child))
template saveLeaf(self: BinaryTrie, value: BytesRange): untyped =
self.hashAndsave(encodeLeafNode(value))
template saveBranch(self: BinaryTrie, L, R: BytesRange): untyped =
self.hashAndsave(encodeBranchNode(L, R))
proc setBranchNode(self: BinaryTrie, keyPath: TrieBitRange, node: TrieNode,
value: BytesRange, deleteSubtrie = false): TrieNodeKey
proc setKVNode(self: BinaryTrie, keyPath: TrieBitRange, nodeHash: TrieNodeKey,
node: TrieNode, value: BytesRange, deleteSubtrie = false): TrieNodeKey
const
overrideErrorMsg =
"Fail to set the value because the prefix of it's key is the same as existing key"
proc setAux(self: BinaryTrie, nodeHash: TrieNodeKey, keyPath: TrieBitRange,
value: BytesRange, deleteSubtrie = false): TrieNodeKey =
## If deleteSubtrie is set to True, what it will do is that it take in a keyPath
## and traverse til the end of keyPath, then delete the whole subtrie of that node.
## Note: keyPath should be in binary array format, i.e., encoded by encode_to_bin()
template checkBadKeyPath(): untyped =
# keyPath too short
if keyPath.len == 0:
if deleteSubtrie: return zeroHash
else: raise newException(NodeOverrideError, overrideErrorMsg)
template ifGoodValue(body: untyped): untyped =
if value.len != 0: body
else: return zeroHash
# Empty trie
if isZeroHash(nodeHash):
ifGoodValue:
return self.saveKV(keyPath, self.saveLeaf(value))
let node = self.fetchNode(nodeHash)
case node.kind
of LEAF_TYPE: # Node is a leaf node
# keyPath must match, there should be no remaining keyPath
if keyPath.len != 0:
raise newException(NodeOverrideError, overrideErrorMsg)
if deleteSubtrie: return zeroHash
ifGoodValue:
return self.saveLeaf(value)
of KV_TYPE: # node is a key-value node
checkBadKeyPath()
return self.setKVNode(keyPath, nodeHash, node, value, deleteSubtrie)
of BRANCH_TYPE: # node is a branch node
checkBadKeyPath()
return self.setBranchNode(keyPath, node, value, deleteSubtrie)
else:
raise newException(Exception, "Invariant: This shouldn't ever happen")
proc set*(self: var BinaryTrie, key, value: distinct BytesContainer) {.inline.} =
## Sets the value at the given keyPath from the given node
## Key will be encoded into binary array format first.
var keyBits = bits MutByteRange(key.toRange)
self.rootHash = self.setAux(self.rootHash, keyBits, toRange(value))
proc setBranchNode(self: BinaryTrie, keyPath: TrieBitRange, node: TrieNode,
value: BytesRange, deleteSubtrie = false): TrieNodeKey =
# Which child node to update? Depends on first bit in keyPath
var newLeftChild, newRightChild: TrieNodeKey
if keyPath[0]: # first bit == 1
newRightChild = self.setAux(node.rightChild, keyPath[1..^1], value, deleteSubtrie)
newLeftChild = node.leftChild
else:
newLeftChild = self.setAux(node.leftChild, keyPath[1..^1], value, deleteSubtrie)
newRightChild = node.rightChild
let blankLeft = isZeroHash(newLeftChild)
# Compress branch node into kv node
if blankLeft or isZeroHash(newRightChild):
let childNode = if blankLeft: newRightChild else: newLeftChild
var subNode = self.fetchNode(childNode)
# Compress (k1, (k2, NODE)) -> (k1 + k2, NODE)
if subNode.kind == KV_TYPE:
# exploit subNode.keyPath unused prefix bit
# to avoid bitVector concat
subNode.keyPath.pushFront(blankLeft)
result = self.saveKV(subNode.keyPath, subNode.child)
# kv node pointing to a branch node
elif subNode.kind in {BRANCH_TYPE, LEAF_TYPE}:
result = self.saveKV(blankLeft, childNode)
else:
result = self.saveBranch(newLeftChild, newRightChild)
proc setKVNode(self: BinaryTrie, keyPath: TrieBitRange, nodeHash: TrieNodeKey,
node: TrieNode, value: BytesRange, deleteSubtrie = false): TrieNodeKey =
# keyPath prefixes match
if deleteSubtrie:
if keyPath.len < node.keyPath.len and keyPath == node.keyPath[0..<keyPath.len]:
return zeroHash
let sliceLen = min(node.keyPath.len, keyPath.len)
if keyPath[0..<sliceLen] == node.keyPath:
# Recurse into child
let subNodeHash = self.setAux(node.child,
keyPath.sliceToEnd(node.keyPath.len), value, deleteSubtrie)
# If child is empty
if isZeroHash(subNodeHash):
return zeroHash
let subNode = self.fetchNode(subNodeHash)
# If the child is a key-value node, compress together the keyPaths
# into one node
if subNode.kind == KV_TYPE:
return self.saveKV(node.keyPath & subNode.keyPath, subNode.child)
else:
return self.saveKV(node.keyPath, subNodeHash)
# keyPath prefixes don't match. Here we will be converting a key-value node
# of the form (k, CHILD) into a structure of one of the following forms:
# 1. (k[:-1], (NEWCHILD, CHILD))
# 2. (k[:-1], ((k2, NEWCHILD), CHILD))
# 3. (k1, ((k2, CHILD), NEWCHILD))
# 4. (k1, ((k2, CHILD), (k2', NEWCHILD))
# 5. (CHILD, NEWCHILD)
# 6. ((k[1:], CHILD), (k', NEWCHILD))
# 7. ((k[1:], CHILD), NEWCHILD)
# 8. (CHILD, (k[1:], NEWCHILD))
else:
let
commonPrefixLen = getCommonPrefixLength(node.keyPath, keyPath[0..<sliceLen])
cplenPlusOne = commonPrefixLen + 1
# New key-value pair can not contain empty value
# Or one can not delete non-exist subtrie
if value.len == 0 or deleteSubtrie: return nodeHash
var valNode, oldNode, newSub: TrieNodeKey
# valnode: the child node that has the new value we are adding
# Case 1: keyPath prefixes almost match, so we are in case (1), (2), (5), (6)
if keyPath.len == cplenPlusOne:
valNode = self.saveLeaf(value)
# Case 2: keyPath prefixes mismatch in the middle, so we need to break
# the keyPath in half. We are in case (3), (4), (7), (8)
else:
if keyPath.len <= commonPrefixLen:
raise newException(NodeOverrideError, overrideErrorMsg)
valNode = self.saveKV(keyPath[cplenPlusOne..^1], self.saveLeaf(value))
# oldnode: the child node the has the old child value
# Case 1: (1), (3), (5), (6)
if node.keyPath.len == cplenPlusOne:
oldNode = node.child
# (2), (4), (6), (8)
else:
oldNode = self.saveKV(node.keyPath[cplenPlusOne..^1], node.child)
# Create the new branch node (because the key paths diverge, there has to
# be some "first bit" at which they diverge, so there must be a branch
# node somewhere)
if keyPath[commonPrefixLen]: # first bit == 1
newSub = self.saveBranch(oldNode, valNode)
else:
newSub = self.saveBranch(valNode, oldNode)
# Case 1: keyPath prefixes match in the first bit, so we still need
# a kv node at the top
# (1) (2) (3) (4)
if commonPrefixLen != 0:
return self.saveKV(node.keyPath[0..<commonPrefixLen], newSub)
# Case 2: keyPath prefixes diverge in the first bit, so we replace the
# kv node with a branch node
# (5) (6) (7) (8)
else:
return newSub
template exists*(self: BinaryTrie, key: BytesContainer): bool =
self.get(toRange(key)) != zeroBytesRange
proc delete*(self: var BinaryTrie, key: BytesContainer) {.inline.} =
## Equals to setting the value to zeroBytesRange
var keyBits = bits MutByteRange(key.toRange)
self.rootHash = self.setAux(self.rootHash, keyBits, zeroBytesRange)
proc deleteSubtrie*(self: var BinaryTrie, key: BytesContainer) {.inline.} =
## Given a key prefix, delete the whole subtrie that starts with the key prefix.
## Key will be encoded into binary array format first.
## It will call `setAux` with `deleteSubtrie` set to true.
var keyBits = bits MutByteRange(key.toRange)
self.rootHash = self.setAux(self.rootHash, keyBits, zeroBytesRange, true)
# Convenience
proc rootNode*(self: BinaryTrie): BytesRange {.inline.} =
self.db.get(self.rootHash.toOpenArray).toRange
proc rootNode*(self: var BinaryTrie, node: BytesContainer) {.inline.} =
self.rootHash = self.hashAndSave(toRange(node))
# Dictionary API
template `[]`*(self: BinaryTrie, key: BytesContainer): BytesRange =
self.get(key)
template `[]=`*(self: var BinaryTrie, key, value: distinct BytesContainer) =
self.set(key, value)
template contains*(self: BinaryTrie, key: BytesContainer): bool =
self.exists(key)

176
eth/trie/branches.nim Normal file
View File

@ -0,0 +1,176 @@
import
eth/rlp/types, ranges/bitranges,
trie_defs, binary, binaries, db, trie_utils
type
DB = TrieDatabaseRef
InvalidKeyError* = object of Exception
template query(db: DB, nodeHash: TrieNodeKey): BytesRange =
db.get(nodeHash.toOpenArray).toRange
proc checkIfBranchExistImpl(db: DB; nodeHash: TrieNodeKey; keyPrefix: TrieBitRange): bool =
if nodeHash == zeroHash:
return false
let node = parseNode(db.query(nodeHash))
case node.kind:
of LEAF_TYPE:
if keyPrefix.len != 0: return false
return true
of KV_TYPE:
if keyPrefix.len == 0: return true
if keyPrefix.len < node.keyPath.len:
if keyPrefix == node.keyPath[0..<keyPrefix.len]: return true
return false
else:
if keyPrefix[0..<node.keyPath.len] == node.keyPath:
return checkIfBranchExistImpl(db, node.child, keyPrefix.sliceToEnd(node.keyPath.len))
return false
of BRANCH_TYPE:
if keyPrefix.len == 0: return true
if keyPrefix[0] == false:
return checkIfBranchExistImpl(db, node.leftChild, keyPrefix.sliceToEnd(1))
else:
return checkIfBranchExistImpl(db, node.rightChild, keyPrefix.sliceToEnd(1))
else:
raise newException(Exception, "Invariant: unreachable code path")
proc checkIfBranchExist*(db: DB; rootHash: BytesContainer | KeccakHash, keyPrefix: BytesContainer): bool =
## Given a key prefix, return whether this prefix is
## the prefix of an existing key in the trie.
checkValidHashZ(rootHash)
var keyPrefixBits = bits MutByteRange(keyPrefix.toRange)
checkIfBranchExistImpl(db, toRange(rootHash), keyPrefixBits)
proc getBranchImpl(db: DB; nodeHash: TrieNodeKey, keyPath: TrieBitRange, output: var seq[BytesRange]) =
if nodeHash == zeroHash: return
let nodeVal = db.query(nodeHash)
let node = parseNode(nodeVal)
case node.kind
of LEAF_TYPE:
if keyPath.len == 0:
output.add nodeVal
else:
raise newException(InvalidKeyError, "Key too long")
of KV_TYPE:
if keyPath.len == 0:
raise newException(InvalidKeyError, "Key too short")
output.add nodeVal
let sliceLen = min(keyPath.len, node.keyPath.len)
if keyPath[0..<sliceLen] == node.keyPath:
getBranchImpl(db, node.child, keyPath.sliceToEnd(sliceLen), output)
of BRANCH_TYPE:
if keyPath.len == 0:
raise newException(InvalidKeyError, "Key too short")
output.add nodeVal
if keyPath[0] == false:
getBranchImpl(db, node.leftChild, keyPath.sliceToEnd(1), output)
else:
getBranchImpl(db, node.rightChild, keyPath.sliceToEnd(1), output)
else:
raise newException(Exception, "Invariant: unreachable code path")
proc getBranch*(db: DB; rootHash: BytesContainer | KeccakHash; key: BytesContainer): seq[BytesRange] =
## Get a long-format Merkle branch
checkValidHashZ(rootHash)
result = @[]
var keyBits = bits MutByteRange(key.toRange)
getBranchImpl(db, toRange(rootHash), keyBits, result)
proc isValidBranch*(branch: seq[BytesRange], rootHash: BytesContainer | KeccakHash, key, value: BytesContainer): bool =
checkValidHashZ(rootHash)
# branch must not be empty
assert(branch.len != 0)
var db = newMemoryDB()
for node in branch:
assert(node.len != 0)
let nodeHash = keccakHash(node)
db.put(nodeHash.toOpenArray, node.toOpenArray)
var trie = initBinaryTrie(db, rootHash)
result = trie.get(key) == toRange(value)
proc getTrieNodesImpl(db: DB; nodeHash: TrieNodeKey, output: var seq[BytesRange]): bool =
## Get full trie of a given root node
if nodeHash.isZeroHash(): return false
var nodeVal: BytesRange
if nodeHash.toOpenArray in db:
nodeVal = db.query(nodeHash)
else:
return false
let node = parseNode(nodeVal)
case node.kind
of KV_TYPE:
output.add nodeVal
result = getTrieNodesImpl(db, node.child, output)
of BRANCH_TYPE:
output.add nodeVal
result = getTrieNodesImpl(db, node.leftChild, output)
result = getTrieNodesImpl(db, node.rightChild, output)
of LEAF_TYPE:
output.add nodeVal
else:
raise newException(Exception, "Invariant: unreachable code path")
proc getTrieNodes*(db: DB; nodeHash: BytesContainer | KeccakHash): seq[BytesRange] =
checkValidHashZ(nodeHash)
result = @[]
discard getTrieNodesImpl(db, toRange(nodeHash), result)
proc getWitnessImpl*(db: DB; nodeHash: TrieNodeKey; keyPath: TrieBitRange; output: var seq[BytesRange]) =
if keyPath.len == 0:
if not getTrieNodesImpl(db, nodeHash, output): return
if nodeHash.isZeroHash(): return
var nodeVal: BytesRange
if nodeHash.toOpenArray in db:
nodeVal = db.query(nodeHash)
else:
return
let node = parseNode(nodeVal)
case node.kind
of LEAF_TYPE:
if keyPath.len != 0:
raise newException(InvalidKeyError, "Key too long")
of KV_TYPE:
output.add nodeVal
if keyPath.len < node.keyPath.len and node.keyPath[0..<keyPath.len] == keypath:
if not getTrieNodesImpl(db, node.child, output): return
elif keyPath[0..<node.keyPath.len] == node.keyPath:
getWitnessImpl(db, node.child, keyPath.sliceToEnd(node.keyPath.len), output)
of BRANCH_TYPE:
output.add nodeVal
if keyPath[0] == false:
getWitnessImpl(db, node.leftChild, keyPath.sliceToEnd(1), output)
else:
getWitnessImpl(db, node.rightChild, keyPath.sliceToEnd(1), output)
else:
raise newException(Exception, "Invariant: unreachable code path")
proc getWitness*(db: DB; nodeHash: BytesContainer | KeccakHash; key: BytesContainer): seq[BytesRange] =
## Get all witness given a keyPath prefix.
## Include
##
## 1. witness along the keyPath and
## 2. witness in the subtrie of the last node in keyPath
checkValidHashZ(nodeHash)
result = @[]
var keyBits = bits MutByteRange(key.toRange)
getWitnessImpl(db, toRange(nodeHash), keyBits, result)

229
eth/trie/db.nim Normal file
View File

@ -0,0 +1,229 @@
import
tables, hashes, sets,
nimcrypto/[hash, keccak], eth/rlp,
trie_defs, db_tracing
type
MemDBRec = object
refCount: int
value: Bytes
MemoryLayer* = ref object of RootObj
records: Table[Bytes, MemDBRec]
deleted: HashSet[Bytes]
TrieDatabaseConcept* = concept DB
mixin put, del, get
put(var DB, KeccakHash, BytesRange)
del(var DB, KeccakHash)
get(DB, KeccakHash) is Bytes
contains(DB, KeccakHash) is bool
# XXX: poor's man vtref types
PutProc = proc (db: RootRef, key, val: openarray[byte]) {.gcsafe.}
GetProc = proc (db: RootRef, key: openarray[byte]): Bytes {.gcsafe.} # Must return empty seq if not found
DelProc = proc (db: RootRef, key: openarray[byte]) {.gcsafe.}
ContainsProc = proc (db: RootRef, key: openarray[byte]): bool {.gcsafe.}
TrieDatabaseRef* = ref object
obj: RootRef
putProc: PutProc
getProc: GetProc
delProc: DelProc
containsProc: ContainsProc
mostInnerTransaction: DbTransaction
DbTransaction* = ref object
db: TrieDatabaseRef
parentTransaction: DbTransaction
modifications: MemoryLayer
committed: bool
proc put*(db: TrieDatabaseRef, key, val: openarray[byte]) {.gcsafe.}
proc get*(db: TrieDatabaseRef, key: openarray[byte]): Bytes {.gcsafe.}
proc del*(db: TrieDatabaseRef, key: openarray[byte]) {.gcsafe.}
proc beginTransaction*(db: TrieDatabaseRef): DbTransaction {.gcsafe.}
proc keccak*(r: BytesRange): KeccakHash =
keccak256.digest r.toOpenArray
proc get*(db: MemoryLayer, key: openarray[byte]): Bytes =
result = db.records.getOrDefault(@key).value
traceGet key, result
proc del*(db: MemoryLayer, key: openarray[byte]) =
traceDel key
# The database should ensure that the empty key is always active:
if key != emptyRlpHash.data:
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
db.records.withValue(key, v):
dec v.refCount
if v.refCount <= 0:
db.records.del(key)
db.deleted.incl(key)
proc contains*(db: MemoryLayer, key: openarray[byte]): bool =
db.records.hasKey(@key)
proc put*(db: MemoryLayer, key, val: openarray[byte]) =
tracePut key, val
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
db.deleted.excl(key)
if key.len != 32:
# This is not a Trie key, but a regular system mutable key
# (e.g. the cannonical head hash). We don't need to ref count such keys.
db.records[key] = MemDBRec(refCount: 1, value: @val)
else:
db.records.withValue(key, v) do:
inc v.refCount
if v.value != val: v.value = @val
do:
db.records[key] = MemDBRec(refCount: 1, value: @val)
proc newMemoryLayer: MemoryLayer =
result.new
result.records = initTable[Bytes, MemDBRec]()
result.deleted = initSet[Bytes]()
proc commit(memDb: MemoryLayer, db: TrieDatabaseRef) =
for k in memDb.deleted:
db.del(k)
for k, v in memDb.records:
db.put(k, v.value)
proc init(db: var MemoryLayer) =
db = newMemoryLayer()
proc newMemoryDB*: TrieDatabaseRef =
new result
discard result.beginTransaction
put(result, emptyRlpHash.data, emptyRlp)
template isMemoryDB(db: TrieDatabaseRef): bool =
# Make sure this is really a MemoryDB
db.obj == nil and
db.mostInnerTransaction != nil and
db.mostInnerTransaction.parentTransaction == nil
proc totalRecordsInMemoryDB*(db: TrieDatabaseRef): int =
assert isMemoryDB(db)
return db.mostInnerTransaction.modifications.records.len
iterator pairsInMemoryDB*(db: TrieDatabaseRef): (Bytes, Bytes) =
assert isMemoryDB(db)
for k, v in db.mostInnerTransaction.modifications.records:
yield (k, v.value)
proc beginTransaction*(db: TrieDatabaseRef): DbTransaction =
new result
result.db = db
init result.modifications
result.parentTransaction = db.mostInnerTransaction
db.mostInnerTransaction = result
proc rollback*(t: DbTransaction) =
# Transactions should be handled in a strictly nested fashion.
# Any child transaction must be committed or rolled-back before
# its parent transactions:
doAssert t.db.mostInnerTransaction == t and not t.committed
t.db.mostInnerTransaction = t.parentTransaction
proc commit*(t: DbTransaction) =
# Transactions should be handled in a strictly nested fashion.
# Any child transaction must be committed or rolled-back before
# its parent transactions:
doAssert t.db.mostInnerTransaction == t and not t.committed
t.db.mostInnerTransaction = t.parentTransaction
t.modifications.commit(t.db)
t.committed = true
proc dispose*(t: DbTransaction) {.inline.} =
if not t.committed:
t.rollback()
proc safeDispose*(t: DbTransaction) {.inline.} =
if t != nil and not t.committed:
t.rollback()
proc putImpl[T](db: RootRef, key, val: openarray[byte]) =
mixin put
put(T(db), key, val)
proc getImpl[T](db: RootRef, key: openarray[byte]): Bytes =
mixin get
return get(T(db), key)
proc delImpl[T](db: RootRef, key: openarray[byte]) =
mixin del
del(T(db), key)
proc containsImpl[T](db: RootRef, key: openarray[byte]): bool =
mixin contains
return contains(T(db), key)
proc trieDB*[T: RootRef](x: T): TrieDatabaseRef =
mixin del, get, put
new result
result.obj = x
result.putProc = putImpl[T]
result.getProc = getImpl[T]
result.delProc = delImpl[T]
result.containsProc = containsImpl[T]
proc put*(db: TrieDatabaseRef, key, val: openarray[byte]) =
var t = db.mostInnerTransaction
if t != nil:
t.modifications.put(key, val)
else:
db.putProc(db.obj, key, val)
proc get*(db: TrieDatabaseRef, key: openarray[byte]): Bytes =
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
var t = db.mostInnerTransaction
while t != nil:
result = t.modifications.records.getOrDefault(key).value
if result.len > 0 or key in t.modifications.deleted:
return
t = t.parentTransaction
if db.getProc != nil:
result = db.getProc(db.obj, key)
proc del*(db: TrieDatabaseRef, key: openarray[byte]) =
var t = db.mostInnerTransaction
if t != nil:
t.modifications.del(key)
else:
db.delProc(db.obj, key)
proc contains*(db: TrieDatabaseRef, key: openarray[byte]): bool =
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
var t = db.mostInnerTransaction
while t != nil:
result = key in t.modifications.records
if result or key in t.modifications.deleted:
return
t = t.parentTransaction
if db.containsProc != nil:
result = db.containsProc(db.obj, key)

25
eth/trie/db_tracing.nim Normal file
View File

@ -0,0 +1,25 @@
const
db_tracing* {.strdefine.} = "off"
var
dbTracingEnabled* = true
when db_tracing in ["on", "1"]:
import nimcrypto/utils
template traceGet*(k, v) =
if dbTracingEnabled:
echo "GET ", toHex(k), " = ", toHex(v) # rlpFromBytes(@v.toRange).inspect
template tracePut*(k, v) =
if dbTracingEnabled:
echo "PUT ", toHex(k), " = ", toHex(v) # rlpFromBytes(@v.toRange).inspect
template traceDel*(k) =
if dbTracingEnabled:
echo "DEL ", toHex(k)
else:
template traceGet*(k, v) = discard
template tracePut*(k, v) = discard
template traceDel*(k) = discard

649
eth/trie/hexary.nim Normal file
View File

@ -0,0 +1,649 @@
import
tables,
nimcrypto/[keccak, hash, utils], ranges/ptr_arith, eth/rlp,
trie_defs, nibbles, trie_utils as trieUtils, db
type
TrieNodeKey = object
hash: KeccakHash
usedBytes: uint8
DB = TrieDatabaseRef
HexaryTrie* = object
db*: DB
root: TrieNodeKey
isPruning: bool
SecureHexaryTrie* = distinct HexaryTrie
TrieNode = Rlp
TrieError* = object of Exception
CorruptedTrieError* = object of TrieError
PersistenceFailure* = object of TrieError
template len(key: TrieNodeKey): int =
key.usedBytes.int
proc keccak*(r: BytesRange): KeccakHash =
keccak256.digest r.toOpenArray
template asDbKey(k: TrieNodeKey): untyped =
assert k.usedBytes == 32
k.hash.data
proc expectHash(r: Rlp): BytesRange =
result = r.toBytes
if result.len != 32:
raise newException(RlpTypeMismatch,
"RLP expected to be a Keccak hash value, but has an incorrect length")
proc dbPut(db: DB, data: BytesRange): TrieNodeKey {.gcsafe.}
template get(db: DB, key: Rlp): BytesRange =
db.get(key.expectHash.toOpenArray).toRange
converter toTrieNodeKey(hash: KeccakHash): TrieNodeKey =
result.hash = hash
result.usedBytes = 32
proc initHexaryTrie*(db: DB, rootHash: KeccakHash, isPruning = true): HexaryTrie =
result.db = db
result.root = rootHash
result.isPruning = isPruning
template initSecureHexaryTrie*(db: DB, rootHash: KeccakHash, isPruning = true): SecureHexaryTrie =
SecureHexaryTrie initHexaryTrie(db, rootHash, isPruning)
proc initHexaryTrie*(db: DB, isPruning = true): HexaryTrie =
result.db = db
result.root = result.db.dbPut(emptyRlp.toRange)
result.isPruning = isPruning
template initSecureHexaryTrie*(db: DB, isPruning = true): SecureHexaryTrie =
SecureHexaryTrie initHexaryTrie(db, isPruning)
proc rootHash*(t: HexaryTrie): KeccakHash =
t.root.hash
proc rootHashHex*(t: HexaryTrie): string =
$t.root.hash
template prune(t: HexaryTrie, x: openArray[byte]) =
if t.isPruning: t.db.del(x)
proc isPruning*(t: HexaryTrie): bool =
t.isPruning
proc getLocalBytes(x: TrieNodeKey): BytesRange =
## This proc should be used on nodes using the optimization
## of short values within the key.
assert x.usedBytes < 32
when defined(rangesEnableUnsafeAPI):
result = unsafeRangeConstruction(x.data, x.usedBytes)
else:
var dataCopy = newSeq[byte](x.usedBytes)
copyMem(dataCopy.baseAddr, x.hash.data.baseAddr, x.usedBytes)
return dataCopy.toRange
template keyToLocalBytes(db: DB, k: TrieNodeKey): BytesRange =
if k.len < 32: k.getLocalBytes
else: db.get(k.asDbKey).toRange
template extensionNodeKey(r: Rlp): auto =
hexPrefixDecode r.listElem(0).toBytes
proc getAux(db: DB, nodeRlp: Rlp, path: NibblesRange): BytesRange {.gcsafe.}
proc getAuxByHash(db: DB, node: TrieNodeKey, path: NibblesRange): BytesRange =
var nodeRlp = rlpFromBytes keyToLocalBytes(db, node)
return getAux(db, nodeRlp, path)
template getLookup(elem: untyped): untyped =
if elem.isList: elem
else: rlpFromBytes(get(db, toOpenArray(elem.expectHash)).toRange)
proc getAux(db: DB, nodeRlp: Rlp, path: NibblesRange): BytesRange =
if not nodeRlp.hasData or nodeRlp.isEmpty:
return zeroBytesRange
case nodeRlp.listLen
of 2:
let (isLeaf, k) = nodeRlp.extensionNodeKey
let sharedNibbles = sharedPrefixLen(path, k)
if sharedNibbles == k.len:
let value = nodeRlp.listElem(1)
if sharedNibbles == path.len and isLeaf:
return value.toBytes
elif not isLeaf:
let nextLookup = value.getLookup
return getAux(db, nextLookup, path.slice(sharedNibbles))
return zeroBytesRange
of 17:
if path.len == 0:
return nodeRlp.listElem(16).toBytes
var branch = nodeRlp.listElem(path[0].int)
if branch.isEmpty:
return zeroBytesRange
else:
let nextLookup = branch.getLookup
return getAux(db, nextLookup, path.slice(1))
else:
raise newException(CorruptedTrieError,
"HexaryTrie node with an unexpected number of children")
proc get*(self: HexaryTrie; key: BytesRange): BytesRange =
return getAuxByHash(self.db, self.root, initNibbleRange(key))
proc getKeysAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]]): BytesRange =
while stack.len > 0:
let (nodeRlp, path) = stack.pop()
if not nodeRlp.hasData or nodeRlp.isEmpty:
continue
case nodeRlp.listLen
of 2:
let
(isLeaf, k) = nodeRlp.extensionNodeKey
key = path & k
if isLeaf:
assert(key.len mod 2 == 0)
return key.getBytes
else:
let
value = nodeRlp.listElem(1)
nextLookup = value.getLookup
stack.add((nextLookup, key))
of 17:
for i in 0 ..< 16:
var branch = nodeRlp.listElem(i)
if not branch.isEmpty:
let nextLookup = branch.getLookup
var key = path.cloneAndReserveNibble()
key.replaceLastNibble(i.byte)
stack.add((nextLookup, key))
var lastElem = nodeRlp.listElem(16)
if not lastElem.isEmpty:
assert(path.len mod 2 == 0)
return path.getBytes
else:
raise newException(CorruptedTrieError,
"HexaryTrie node with an unexpected number of children")
iterator keys*(self: HexaryTrie): BytesRange =
var
nodeRlp = rlpFromBytes keyToLocalBytes(self.db, self.root)
path = newRange[byte](0)
stack = @[(nodeRlp, initNibbleRange(path))]
while stack.len > 0:
yield getKeysAux(self.db, stack)
proc getValuesAux(db: DB, stack: var seq[Rlp]): BytesRange =
while stack.len > 0:
let nodeRlp = stack.pop()
if not nodeRlp.hasData or nodeRlp.isEmpty:
continue
case nodeRlp.listLen
of 2:
let
(isLeaf, _) = nodeRlp.extensionNodeKey
value = nodeRlp.listElem(1)
if isLeaf:
return value.toBytes
else:
let nextLookup = value.getLookup
stack.add(nextLookup)
of 17:
for i in 0 ..< 16:
var branch = nodeRlp.listElem(i)
if not branch.isEmpty:
let nextLookup = branch.getLookup
stack.add(nextLookup)
var lastElem = nodeRlp.listElem(16)
if not lastElem.isEmpty:
return lastElem.toBytes
else:
raise newException(CorruptedTrieError,
"HexaryTrie node with an unexpected number of children")
iterator values*(self: HexaryTrie): BytesRange =
var
nodeRlp = rlpFromBytes keyToLocalBytes(self.db, self.root)
stack = @[nodeRlp]
while stack.len > 0:
yield getValuesAux(self.db, stack)
proc getPairsAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]]): (BytesRange, BytesRange) =
while stack.len > 0:
let (nodeRlp, path) = stack.pop()
if not nodeRlp.hasData or nodeRlp.isEmpty:
continue
case nodeRlp.listLen
of 2:
let
(isLeaf, k) = nodeRlp.extensionNodeKey
key = path & k
value = nodeRlp.listElem(1)
if isLeaf:
assert(key.len mod 2 == 0)
return (key.getBytes, value.toBytes)
else:
let nextLookup = value.getLookup
stack.add((nextLookup, key))
of 17:
for i in 0 ..< 16:
var branch = nodeRlp.listElem(i)
if not branch.isEmpty:
let nextLookup = branch.getLookup
var key = path.cloneAndReserveNibble()
key.replaceLastNibble(i.byte)
stack.add((nextLookup, key))
var lastElem = nodeRlp.listElem(16)
if not lastElem.isEmpty:
assert(path.len mod 2 == 0)
return (path.getBytes, lastElem.toBytes)
else:
raise newException(CorruptedTrieError,
"HexaryTrie node with an unexpected number of children")
iterator pairs*(self: HexaryTrie): (BytesRange, BytesRange) =
var
nodeRlp = rlpFromBytes keyToLocalBytes(self.db, self.root)
path = newRange[byte](0)
stack = @[(nodeRlp, initNibbleRange(path))]
while stack.len > 0:
# perhaps a Nim bug #9778
# cannot yield the helper proc directly
# it will cut the yield in half
let res = getPairsAux(self.db, stack)
yield res
proc getValues*(self: HexaryTrie): seq[BytesRange] =
result = @[]
for v in self.values:
result.add v
proc getKeys*(self: HexaryTrie): seq[BytesRange] =
result = @[]
for k in self.keys:
result.add k
template getNode(elem: untyped): untyped =
if elem.isList: elem.rawData
else: get(db, toOpenArray(elem.expectHash)).toRange
proc getBranchAux(db: DB, node: BytesRange, path: NibblesRange, output: var seq[BytesRange]) =
var nodeRlp = rlpFromBytes node
if not nodeRlp.hasData or nodeRlp.isEmpty: return
case nodeRlp.listLen
of 2:
let (isLeaf, k) = nodeRlp.extensionNodeKey
let sharedNibbles = sharedPrefixLen(path, k)
if sharedNibbles == k.len:
let value = nodeRlp.listElem(1)
if not isLeaf:
let nextLookup = value.getNode
output.add nextLookup
getBranchAux(db, nextLookup, path.slice(sharedNibbles), output)
of 17:
if path.len != 0:
var branch = nodeRlp.listElem(path[0].int)
if not branch.isEmpty:
let nextLookup = branch.getNode
output.add nextLookup
getBranchAux(db, nextLookup, path.slice(1), output)
else:
raise newException(CorruptedTrieError,
"HexaryTrie node with an unexpected number of children")
proc getBranch*(self: HexaryTrie; key: BytesRange): seq[BytesRange] =
result = @[]
var node = keyToLocalBytes(self.db, self.root)
result.add node
getBranchAux(self.db, node, initNibbleRange(key), result)
proc dbDel(t: var HexaryTrie, data: BytesRange) =
if data.len >= 32: t.prune(data.keccak.data)
proc dbPut(db: DB, data: BytesRange): TrieNodeKey =
result.hash = data.keccak
result.usedBytes = 32
put(db, result.asDbKey, data.toOpenArray)
proc appendAndSave(rlpWriter: var RlpWriter, data: BytesRange, db: DB) =
if data.len >= 32:
var nodeKey = dbPut(db, data)
rlpWriter.append(nodeKey.hash)
else:
rlpWriter.appendRawBytes(data)
proc isTrieBranch(rlp: Rlp): bool =
rlp.isList and (var len = rlp.listLen; len == 2 or len == 17)
proc replaceValue(data: Rlp, key: NibblesRange, value: BytesRange): Bytes =
if data.isEmpty:
let prefix = hexPrefixEncode(key, true)
return encodeList(prefix, value)
assert data.isTrieBranch
if data.listLen == 2:
return encodeList(data.listElem(0), value)
var r = initRlpList(17)
# XXX: This can be optmized to a direct bitwise copy of the source RLP
var iter = data
iter.enterList()
for i in 0 ..< 16:
r.append iter
iter.skipElem
r.append value
return r.finish()
proc isTwoItemNode(self: HexaryTrie; r: Rlp): bool =
if r.isBlob:
let resolved = self.db.get(r)
let rlp = rlpFromBytes(resolved)
return rlp.isList and rlp.listLen == 2
else:
return r.isList and r.listLen == 2
proc isLeaf(r: Rlp): bool =
assert r.isList and r.listLen == 2
let b = r.listElem(0).toBytes()
return (b[0] and 0x20) != 0
proc findSingleChild(r: Rlp; childPos: var byte): Rlp =
result = zeroBytesRlp
var i: byte = 0
var rlp = r
for elem in rlp:
if not elem.isEmpty:
if not result.hasData:
result = elem
childPos = i
else:
return zeroBytesRlp
inc i
proc deleteAt(self: var HexaryTrie; origRlp: Rlp, key: NibblesRange): BytesRange {.gcsafe.}
proc deleteAux(self: var HexaryTrie; rlpWriter: var RlpWriter;
origRlp: Rlp; path: NibblesRange): bool =
if origRlp.isEmpty:
return false
var toDelete = if origRlp.isList: origRlp
else: rlpFromBytes self.db.get(origRlp)
let b = self.deleteAt(toDelete, path)
if b.len == 0:
return false
rlpWriter.appendAndSave(b, self.db)
return true
proc graft(self: var HexaryTrie; r: Rlp): Bytes =
assert r.isList and r.listLen == 2
var (origIsLeaf, origPath) = r.extensionNodeKey
var value = r.listElem(1)
var n: Rlp
if not value.isList:
let nodeKey = value.expectHash
var resolvedData = self.db.get(nodeKey.toOpenArray).toRange
self.prune(nodeKey.toOpenArray)
value = rlpFromBytes resolvedData
assert value.listLen == 2
let (valueIsLeaf, valueKey) = value.extensionNodeKey
var rlpWriter = initRlpList(2)
rlpWriter.append hexPrefixEncode(origPath, valueKey, valueIsLeaf)
rlpWriter.append value.listElem(1)
return rlpWriter.finish
proc mergeAndGraft(self: var HexaryTrie;
soleChild: Rlp, childPos: byte): Bytes =
var output = initRlpList(2)
if childPos == 16:
output.append hexPrefixEncode(zeroNibblesRange, true)
else:
assert(not soleChild.isEmpty)
output.append int(hexPrefixEncodeByte(childPos))
output.append(soleChild)
result = output.finish()
if self.isTwoItemNode(soleChild):
result = self.graft(rlpFromBytes(result.toRange))
proc deleteAt(self: var HexaryTrie;
origRlp: Rlp, key: NibblesRange): BytesRange =
if origRlp.isEmpty:
return zeroBytesRange
assert origRlp.isTrieBranch
let origBytes = origRlp.rawData
if origRlp.listLen == 2:
let (isLeaf, k) = origRlp.extensionNodeKey
if k == key and isLeaf:
self.dbDel origBytes
return emptyRlp.toRange
if key.startsWith(k):
var
rlpWriter = initRlpList(2)
path = origRlp.listElem(0)
value = origRlp.listElem(1)
rlpWriter.append(path)
if not self.deleteAux(rlpWriter, value, key.slice(k.len)):
return zeroBytesRange
self.dbDel origBytes
var finalBytes = rlpWriter.finish.toRange
var rlp = rlpFromBytes(finalBytes)
if self.isTwoItemNode(rlp.listElem(1)):
return self.graft(rlp).toRange
return finalBytes
else:
return zeroBytesRange
else:
if key.len == 0 and origRlp.listElem(16).isEmpty:
self.dbDel origBytes
var foundChildPos: byte
let singleChild = origRlp.findSingleChild(foundChildPos)
if singleChild.hasData and foundChildPos != 16:
result = self.mergeAndGraft(singleChild, foundChildPos).toRange
else:
var rlpRes = initRlpList(17)
var iter = origRlp
iter.enterList
for i in 0 ..< 16:
rlpRes.append iter
iter.skipElem
rlpRes.append ""
return rlpRes.finish.toRange
else:
var rlpWriter = initRlpList(17)
let keyHead = int(key[0])
var i = 0
var origCopy = origRlp
for elem in items(origCopy):
if i == keyHead:
if not self.deleteAux(rlpWriter, elem, key.slice(1)):
return zeroBytesRange
else:
rlpWriter.append(elem)
inc i
self.dbDel origBytes
result = rlpWriter.finish.toRange
var resultRlp = rlpFromBytes(result)
var foundChildPos: byte
let singleChild = resultRlp.findSingleChild(foundChildPos)
if singleChild.hasData:
result = self.mergeAndGraft(singleChild, foundChildPos).toRange
proc del*(self: var HexaryTrie; key: BytesRange) =
var
rootBytes = keyToLocalBytes(self.db, self.root)
rootRlp = rlpFromBytes rootBytes
var newRootBytes = self.deleteAt(rootRlp, initNibbleRange(key))
if newRootBytes.len > 0:
if rootBytes.len < 32:
self.prune(self.root.asDbKey)
self.root = self.db.dbPut(newRootBytes)
proc mergeAt(self: var HexaryTrie, orig: Rlp, origHash: KeccakHash,
key: NibblesRange, value: BytesRange,
isInline = false): BytesRange {.gcsafe.}
proc mergeAt(self: var HexaryTrie, rlp: Rlp,
key: NibblesRange, value: BytesRange,
isInline = false): BytesRange =
self.mergeAt(rlp, rlp.rawData.keccak, key, value, isInline)
proc mergeAtAux(self: var HexaryTrie, output: var RlpWriter, orig: Rlp,
key: NibblesRange, value: BytesRange) =
var resolved = orig
var isRemovable = false
if not (orig.isList or orig.isEmpty):
resolved = rlpFromBytes self.db.get(orig)
isRemovable = true
let b = self.mergeAt(resolved, key, value, not isRemovable)
output.appendAndSave(b, self.db)
proc mergeAt(self: var HexaryTrie, orig: Rlp, origHash: KeccakHash,
key: NibblesRange, value: BytesRange,
isInline = false): BytesRange =
template origWithNewValue: auto =
self.prune(origHash.data)
replaceValue(orig, key, value).toRange
if orig.isEmpty:
return origWithNewValue()
assert orig.isTrieBranch
if orig.listLen == 2:
let (isLeaf, k) = orig.extensionNodeKey
var origValue = orig.listElem(1)
if k == key and isLeaf:
return origWithNewValue()
let sharedNibbles = sharedPrefixLen(key, k)
if sharedNibbles == k.len and not isLeaf:
var r = initRlpList(2)
r.append orig.listElem(0)
self.mergeAtAux(r, origValue, key.slice(k.len), value)
return r.finish.toRange
if orig.rawData.len >= 32:
self.prune(origHash.data)
if sharedNibbles > 0:
# Split the extension node
var bottom = initRlpList(2)
bottom.append hexPrefixEncode(k.slice(sharedNibbles), isLeaf)
bottom.append origValue
var top = initRlpList(2)
top.append hexPrefixEncode(k.slice(0, sharedNibbles), false)
top.appendAndSave(bottom.finish.toRange, self.db)
return self.mergeAt(rlpFromBytes(top.finish.toRange), key, value, true)
else:
# Create a branch node
var branches = initRlpList(17)
if k.len == 0:
# The key is now exhausted. This must be a leaf node
assert isLeaf
for i in 0 ..< 16:
branches.append ""
branches.append origValue
else:
let n = k[0]
for i in 0 ..< 16:
if byte(i) == n:
if isLeaf or k.len > 1:
let childNode = encodeList(hexPrefixEncode(k.slice(1), isLeaf),
origValue).toRange
branches.appendAndSave(childNode, self.db)
else:
branches.append origValue
else:
branches.append ""
branches.append ""
return self.mergeAt(rlpFromBytes(branches.finish.toRange), key, value, true)
else:
if key.len == 0:
return origWithNewValue()
if isInline:
self.prune(origHash.data)
let n = key[0]
var i = 0
var r = initRlpList(17)
var origCopy = orig
for elem in items(origCopy):
if i == int(n):
self.mergeAtAux(r, elem, key.slice(1), value)
else:
r.append(elem)
inc i
return r.finish.toRange
proc put*(self: var HexaryTrie; key, value: BytesRange) =
let root = self.root.hash
var rootBytes = self.db.get(root.data).toRange
assert rootBytes.len > 0
let newRootBytes = self.mergeAt(rlpFromBytes(rootBytes), root,
initNibbleRange(key), value)
if rootBytes.len < 32:
self.prune(root.data)
self.root = self.db.dbPut(newRootBytes)
proc put*(self: var SecureHexaryTrie; key, value: BytesRange) =
let keyHash = @(key.keccak.data)
put(HexaryTrie(self), keyHash.toRange, value)
proc get*(self: SecureHexaryTrie; key: BytesRange): BytesRange =
let keyHash = @(key.keccak.data)
return get(HexaryTrie(self), keyHash.toRange)
proc del*(self: var SecureHexaryTrie; key: BytesRange) =
let keyHash = @(key.keccak.data)
del(HexaryTrie(self), keyHash.toRange)
proc rootHash*(self: SecureHexaryTrie): KeccakHash {.borrow.}
proc rootHashHex*(self: SecureHexaryTrie): string {.borrow.}
proc isPruning*(self: SecureHexaryTrie): bool {.borrow.}
template contains*(self: HexaryTrie | SecureHexaryTrie;
key: BytesRange): bool =
self.get(key).len > 0

164
eth/trie/nibbles.nim Normal file
View File

@ -0,0 +1,164 @@
import
trie_defs
type
NibblesRange* = object
bytes: ByteRange
ibegin, iend: int
proc initNibbleRange*(bytes: ByteRange): NibblesRange =
result.bytes = bytes
result.ibegin = 0
result.iend = bytes.len * 2
const
zeroNibblesRange* = initNibbleRange(zeroBytesRange)
proc `{}`(r: NibblesRange, pos: int): byte {.inline.} =
## This is a helper for a more raw access to the nibbles.
## It works with absolute positions.
if pos > r.iend: raise newException(RangeError, "index out of range")
return if (pos and 1) != 0: (r.bytes[pos div 2] and 0xf)
else: (r.bytes[pos div 2] shr 4)
template `[]`*(r: NibblesRange, i: int): byte = r{r.ibegin + i}
proc len*(r: NibblesRange): int =
r.iend - r.ibegin
proc `==`*(lhs, rhs: NibblesRange): bool =
if lhs.len == rhs.len:
for i in 0 ..< lhs.len:
if lhs[i] != rhs[i]:
return false
return true
else:
return false
proc `$`*(r: NibblesRange): string =
result = newStringOfCap(100)
for i in r.ibegin ..< r.iend:
let n = int r{i}
let c = if n > 9: char(ord('a') + n - 10)
else: char(ord('0') + n)
result.add c
proc slice*(r: NibblesRange, ibegin: int, iend = -1): NibblesRange =
result.bytes = r.bytes
result.ibegin = r.ibegin + ibegin
let e = if iend < 0: r.iend + iend + 1
else: r.ibegin + iend
assert ibegin >= 0 and e <= result.bytes.len * 2
result.iend = e
template writeFirstByte(nibbleCountExpr) {.dirty.} =
let nibbleCount = nibbleCountExpr
var oddnessFlag = (nibbleCount and 1) != 0
newSeq(result, (nibbleCount div 2) + 1)
result[0] = byte((int(isLeaf) * 2 + int(oddnessFlag)) shl 4)
var writeHead = 0
template writeNibbles(r) {.dirty.} =
for i in r.ibegin ..< r.iend:
let nextNibble = r{i}
if oddnessFlag:
result[writeHead] = result[writeHead] or nextNibble
else:
inc writeHead
result[writeHead] = nextNibble shl 4
oddnessFlag = not oddnessFlag
proc hexPrefixEncode*(r: NibblesRange, isLeaf = false): Bytes =
writeFirstByte(r.len)
writeNibbles(r)
proc hexPrefixEncode*(r1, r2: NibblesRange, isLeaf = false): Bytes =
writeFirstByte(r1.len + r2.len)
writeNibbles(r1)
writeNibbles(r2)
proc hexPrefixEncodeByte*(val: byte, isLeaf = false): byte =
assert val < 16
result = (((byte(isLeaf) * 2) + 1) shl 4) or val
proc sharedPrefixLen*(lhs, rhs: NibblesRange): int =
result = 0
while result < lhs.len and result < rhs.len:
if lhs[result] != rhs[result]: break
inc result
proc startsWith*(lhs, rhs: NibblesRange): bool =
sharedPrefixLen(lhs, rhs) == rhs.len
proc hexPrefixDecode*(r: ByteRange): tuple[isLeaf: bool, nibbles: NibblesRange] =
result.nibbles = initNibbleRange(r)
if r.len > 0:
result.isLeaf = (r[0] and 0x20) != 0
let hasOddLen = (r[0] and 0x10) != 0
result.nibbles.ibegin = 2 - int(hasOddLen)
else:
result.isLeaf = false
template putNibble(bytes, x: untyped) =
if odd:
bytes[pos] = (bytes[pos] and 0xF0) or x
inc pos
else:
bytes[pos] = x shl 4
template putNibbles(bytes, src: untyped) =
for i in 0 ..< src.len:
bytes.putNibble(src[i])
odd = not odd
template calcNeededBytes(len: int): int =
(len shr 1) + (len and 1)
proc `&`*(a, b: NibblesRange): NibblesRange =
let
len = a.len + b.len
bytesNeeded = calcNeededBytes(len)
var
bytes = newSeq[byte](bytesNeeded)
odd = false
pos = 0
bytes.putNibbles(a)
bytes.putNibbles(b)
result = initNibbleRange(bytes.toRange)
result.iend = len
proc cloneAndReserveNibble*(a: NibblesRange): NibblesRange =
let
len = a.len + 1
bytesNeeded = calcNeededBytes(len)
var
bytes = newSeq[byte](bytesNeeded)
odd = false
pos = 0
bytes.putNibbles(a)
result = initNibbleRange(bytes.toRange)
result.iend = len
proc replaceLastNibble*(a: var NibblesRange, b: byte) =
var
odd = (a.len and 1) == 0
pos = (a.len shr 1) - odd.int
putNibble(MutRange[byte](a.bytes), b)
proc getBytes*(a: NibblesRange): ByteRange =
a.bytes
when false:
proc keyOf(r: ByteRange): NibblesRange =
let firstIdx = if r.len == 0: 0
elif (r[0] and 0x10) != 0: 1
else: 2
return initNibbleRange(s).slice(firstIdx)

182
eth/trie/sparse_binary.nim Normal file
View File

@ -0,0 +1,182 @@
import
ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types,
trie_defs, trie_utils, db, sparse_proofs
export
types, trie_utils, bitranges,
sparse_proofs.verifyProof
type
DB = TrieDatabaseRef
SparseBinaryTrie* = object
db: DB
rootHash: ByteRange
proc `==`(a: ByteRange, b: KeccakHash): bool =
if a.len != b.data.len: return false
equalMem(a.baseAddr, b.data[0].unsafeAddr, a.len)
type
# 256 * 2 div 8
DoubleHash = array[64, byte]
proc initDoubleHash(a, b: openArray[byte]): DoubleHash =
assert(a.len == 32, $a.len)
assert(b.len == 32, $b.len)
copyMem(result[ 0].addr, a[0].unsafeAddr, 32)
copyMem(result[32].addr, b[0].unsafeAddr, 32)
proc initDoubleHash(x: ByteRange): DoubleHash =
initDoubleHash(x.toOpenArray, x.toOpenArray)
proc init*(x: typedesc[SparseBinaryTrie], db: DB): SparseBinaryTrie =
result.db = db
# Initialize an empty tree with one branch
var value = initDoubleHash(emptyNodeHashes[0])
result.rootHash = keccakHash(value)
result.db.put(result.rootHash.toOpenArray, value)
for i in 0..<treeHeight - 1:
value = initDoubleHash(emptyNodeHashes[i+1])
result.db.put(emptyNodeHashes[i].toOpenArray, value)
result.db.put(emptyLeafNodeHash.data, zeroBytesRange.toOpenArray)
proc initSparseBinaryTrie*(db: DB): SparseBinaryTrie =
init(SparseBinaryTrie, db)
proc init*(x: typedesc[SparseBinaryTrie], db: DB,
rootHash: BytesContainer | KeccakHash): SparseBinaryTrie =
checkValidHashZ(rootHash)
result.db = db
result.rootHash = rootHash
proc initSparseBinaryTrie*(db: DB, rootHash: BytesContainer | KeccakHash): SparseBinaryTrie =
init(SparseBinaryTrie, db, rootHash)
proc getDB*(t: SparseBinaryTrie): auto = t.db
proc getRootHash*(self: SparseBinaryTrie): ByteRange {.inline.} =
self.rootHash
proc getAux(self: SparseBinaryTrie, path: BitRange, rootHash: ByteRange): ByteRange =
var nodeHash = rootHash
for targetBit in path:
let value = self.db.get(nodeHash.toOpenArray).toRange
if value.len == 0: return zeroBytesRange
if targetBit: nodeHash = value[32..^1]
else: nodeHash = value[0..31]
if nodeHash.toOpenArray == emptyLeafNodeHash.data:
result = zeroBytesRange
else:
result = self.db.get(nodeHash.toOpenArray).toRange
proc get*(self: SparseBinaryTrie, key: BytesContainer): ByteRange =
## gets a key from the tree.
assert(key.len == pathByteLen)
let path = MutByteRange(key.toRange).bits
self.getAux(path, self.rootHash)
proc get*(self: SparseBinaryTrie, key, rootHash: distinct BytesContainer): ByteRange =
## gets a key from the tree at a specific root.
assert(key.len == pathByteLen)
let path = MutByteRange(key.toRange).bits
self.getAux(path, rootHash.toRange)
proc hashAndSave*(self: SparseBinaryTrie, node: ByteRange): ByteRange =
result = keccakHash(node)
self.db.put(result.toOpenArray, node.toOpenArray)
proc hashAndSave*(self: SparseBinaryTrie, a, b: ByteRange): ByteRange =
let value = initDoubleHash(a.toOpenArray, b.toOpenArray)
result = keccakHash(value)
self.db.put(result.toOpenArray, value)
proc setAux(self: var SparseBinaryTrie, value: ByteRange,
path: BitRange, depth: int, nodeHash: ByteRange): ByteRange =
if depth == treeHeight:
result = self.hashAndSave(value)
else:
let
node = self.db.get(nodeHash.toOpenArray).toRange
leftNode = node[0..31]
rightNode = node[32..^1]
if path[depth]:
result = self.hashAndSave(leftNode, self.setAux(value, path, depth+1, rightNode))
else:
result = self.hashAndSave(self.setAux(value, path, depth+1, leftNode), rightNode)
proc set*(self: var SparseBinaryTrie, key, value: distinct BytesContainer) =
## sets a new value for a key in the tree, returns the new root,
## and sets the new current root of the tree.
assert(key.len == pathByteLen)
let path = MutByteRange(key.toRange).bits
self.rootHash = self.setAux(value.toRange, path, 0, self.rootHash)
proc set*(self: var SparseBinaryTrie, key, value, rootHash: distinct BytesContainer): ByteRange =
## sets a new value for a key in the tree at a specific root,
## and returns the new root.
assert(key.len == pathByteLen)
let path = MutByteRange(key.toRange).bits
self.setAux(value.toRange, path, 0, rootHash.toRange)
template exists*(self: SparseBinaryTrie, key: BytesContainer): bool =
self.get(toRange(key)) != zeroBytesRange
proc del*(self: var SparseBinaryTrie, key: BytesContainer) =
## Equals to setting the value to zeroBytesRange
assert(key.len == pathByteLen)
self.set(key, zeroBytesRange)
# Dictionary API
template `[]`*(self: SparseBinaryTrie, key: BytesContainer): ByteRange =
self.get(key)
template `[]=`*(self: var SparseBinaryTrie, key, value: distinct BytesContainer) =
self.set(key, value)
template contains*(self: SparseBinaryTrie, key: BytesContainer): bool =
self.exists(key)
proc proveAux(self: SparseBinaryTrie, key, rootHash: ByteRange, output: var seq[ByteRange]): bool =
assert(key.len == pathByteLen)
var currVal = self.db.get(rootHash.toOpenArray).toRange
if currVal.len == 0: return false
let path = MutByteRange(key).bits
for i, bit in path:
if bit:
# right side
output[i] = currVal[0..31]
currVal = self.db.get(currVal[32..^1].toOpenArray).toRange
if currVal.len == 0: return false
else:
output[i] = currVal[32..^1]
currVal = self.db.get(currVal[0..31].toOpenArray).toRange
if currVal.len == 0: return false
result = true
# prove generates a Merkle proof for a key.
proc prove*(self: SparseBinaryTrie, key: BytesContainer): seq[ByteRange] =
result = newSeq[ByteRange](treeHeight)
if not self.proveAux(key.toRange, self.rootHash, result):
result = @[]
# prove generates a Merkle proof for a key, at a specific root.
proc prove*(self: SparseBinaryTrie, key, rootHash: distinct BytesContainer): seq[ByteRange] =
result = newSeq[ByteRange](treeHeight)
if not self.proveAux(key.toRange, rootHash.toRange, result):
result = @[]
# proveCompact generates a compacted Merkle proof for a key.
proc proveCompact*(self: SparseBinaryTrie, key: BytesContainer): seq[ByteRange] =
var temp = self.prove(key)
temp.compactProof
# proveCompact generates a compacted Merkle proof for a key, at a specific root.
proc proveCompact*(self: SparseBinaryTrie, key, rootHash: distinct BytesContainer): seq[ByteRange] =
var temp = self.prove(key, rootHash)
temp.compactProof

View File

@ -0,0 +1,86 @@
import
ranges/[typedranges, bitranges],
trie_defs, db, trie_utils
const
treeHeight* = 160
pathByteLen* = treeHeight div 8
emptyLeafNodeHash* = blankStringHash
proc makeInitialEmptyTreeHash(H: static[int]): array[H, ByteRange] =
result[^1] = @(emptyLeafNodeHash.data).toRange
for i in countdown(H-1, 1):
result[i - 1] = keccakHash(result[i], result[i])
# cannot yet turn this into compile time constant
let emptyNodeHashes* = makeInitialEmptyTreeHash(treeHeight)
# VerifyProof verifies a Merkle proof.
proc verifyProofAux*(proof: seq[ByteRange], root, key, value: ByteRange): bool =
assert(root.len == 32)
assert(key.len == pathByteLen)
var
path = MutByteRange(key).bits
curHash = keccakHash(value)
if proof.len != treeHeight: return false
for i in countdown(treeHeight - 1, 0):
var node = proof[i]
if node.len != 32: return false
if path[i]: # right
# reuse curHash without more alloc
curHash.keccakHash(node, curHash)
else:
curHash.keccakHash(curHash, node)
result = curHash == root
template verifyProof*(proof: seq[ByteRange], root, key, value: distinct BytesContainer): bool =
verifyProofAux(proof, root.toRange, key.toRange, value.toRange)
proc count(b: BitRange, val: bool): int =
for c in b:
if c == val: inc result
# CompactProof compacts a proof, to reduce its size.
proc compactProof*(proof: seq[ByteRange]): seq[ByteRange] =
if proof.len != treeHeight: return
var
data = newRange[byte](pathByteLen)
bits = MutByteRange(data).bits
result = @[]
result.add data
for i in 0 ..< treeHeight:
var node = proof[i]
if node == emptyNodeHashes[i]:
bits[i] = true
else:
result.add node
# decompactProof decompacts a proof, so that it can be used for VerifyProof.
proc decompactProof*(proof: seq[ByteRange]): seq[ByteRange] =
if proof.len == 0: return
if proof[0].len != pathByteLen: return
var bits = MutByteRange(proof[0]).bits
if proof.len != bits.count(false) + 1: return
result = newSeq[ByteRange](treeHeight)
var pos = 1 # skip bits
for i in 0 ..< treeHeight:
if bits[i]:
result[i] = emptyNodeHashes[i]
else:
result[i] = proof[pos]
inc pos
# verifyCompactProof verifies a compacted Merkle proof.
proc verifyCompactProofAux*(proof: seq[ByteRange], root, key, value: ByteRange): bool =
var decompactedProof = decompactProof(proof)
if decompactedProof.len == 0: return false
verifyProofAux(decompactedProof, root, key, value)
template verifyCompactProof*(proof: seq[ByteRange], root, key, value: distinct BytesContainer): bool =
verifyCompactProofAux(proof, root.toRange, key.toRange, value.toRange)

28
eth/trie/trie_defs.nim Normal file
View File

@ -0,0 +1,28 @@
import
eth/rlp, ranges/typedranges, nimcrypto/hash
export
typedranges, Bytes
type
KeccakHash* = MDigest[256]
BytesContainer* = ByteRange | Bytes | string
const
zeroBytesRange* = ByteRange()
blankStringHash* = "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470".toDigest
emptyRlp* = @[128.byte]
emptyRlpHash* = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421".toDigest
proc read*(rlp: var Rlp, T: typedesc[MDigest]): T {.inline.} =
result.data = rlp.read(type(result.data))
proc append*(rlpWriter: var RlpWriter, a: MDigest) {.inline.} =
rlpWriter.append(a.data)
proc unnecessary_OpenArrayToRange*(key: openarray[byte]): ByteRange =
## XXX: The name of this proc is intentionally long, because it
## performs a memory allocation and data copying that may be eliminated
## in the future. Avoid renaming it to something similar as `toRange`, so
## it can remain searchable in the code.
toRange(@key)

75
eth/trie/trie_utils.nim Normal file
View File

@ -0,0 +1,75 @@
import
strutils, parseutils,
ranges/[typedranges, ptr_arith], nimcrypto/[hash, keccak],
trie_defs, binaries
#proc baseAddr*(x: Bytes): ptr byte = x[0].unsafeAddr
proc toTrieNodeKey*(hash: KeccakHash): TrieNodeKey =
result = newRange[byte](32)
copyMem(result.baseAddr, hash.data.baseAddr, 32)
template checkValidHashZ*(x: untyped) =
when x.type isnot KeccakHash:
assert(x.len == 32 or x.len == 0)
template isZeroHash*(x: ByteRange): bool =
x.len == 0
template toRange*(hash: KeccakHash): ByteRange =
toTrieNodeKey(hash)
proc toRange*(str: string): ByteRange =
var s = newSeq[byte](str.len)
if str.len > 0:
copyMem(s[0].addr, str[0].unsafeAddr, str.len)
result = toRange(s)
proc hashFromHex*(bits: static[int], input: string): MDigest[bits] =
if input.len != bits div 4:
raise newException(ValueError,
"The input string has incorrect size")
for i in 0 ..< bits div 8:
var nextByte: int
if parseHex(input, nextByte, i*2, 2) == 2:
result.data[i] = uint8(nextByte)
else:
raise newException(ValueError,
"The input string contains invalid characters")
template hashFromHex*(s: static[string]): untyped = hashFromHex(s.len * 4, s)
proc keccakHash*(input: openArray[byte]): ByteRange =
var s = newSeq[byte](32)
var ctx: keccak256
ctx.init()
if input.len > 0:
ctx.update(input[0].unsafeAddr, uint(input.len))
ctx.finish s
ctx.clear()
result = toRange(s)
proc keccakHash*(dest: var openArray[byte], a, b: openArray[byte]) =
var ctx: keccak256
ctx.init()
if a.len != 0:
ctx.update(a[0].unsafeAddr, uint(a.len))
if b.len != 0:
ctx.update(b[0].unsafeAddr, uint(b.len))
ctx.finish dest
ctx.clear()
proc keccakHash*(a, b: openArray[byte]): ByteRange =
var s = newSeq[byte](32)
keccakHash(s, a, b)
result = toRange(s)
template keccakHash*(input: ByteRange): ByteRange =
keccakHash(input.toOpenArray)
template keccakHash*(a, b: ByteRange): ByteRange =
keccakHash(a.toOpenArray, b.toOpenArray)
template keccakHash*(dest: var ByteRange, a, b: ByteRange) =
keccakHash(dest.toOpenArray, a.toOpenArray, b.toOpenArray)

3
tests/rlp/all.nim Normal file
View File

@ -0,0 +1,3 @@
import
test_api_usage, test_object_serialization, test_json_suite

View File

@ -0,0 +1,6 @@
{
"listsoflists2": {
"in": "VALID",
"out": "c7c0c1c0c3c0c1c0"
}
}

View File

@ -0,0 +1,46 @@
{
"int32Overflow": {
"in": "INVALID",
"out": "bf0f000000000000021111"
},
"int32Overflow2": {
"in": "INVALID",
"out": "ff0f000000000000021111"
},
"wrongSizeList": {
"in": "INVALID",
"out": "f80180"
},
"wrongSizeList2": {
"in": "INVALID",
"out": "f80100"
},
"incorrectLengthInArray": {
"in": "INVALID",
"out": "b9002100dc2b275d0f74e8a53e6f4ec61b27f24278820be3f82ea2110e582081b0565df0"
},
"randomRLP": {
"in": "INVALID",
"out": "f861f83eb9002100dc2b275d0f74e8a53e6f4ec61b27f24278820be3f82ea2110e582081b0565df027b90015002d5ef8325ae4d034df55d4b58d0dfba64d61ddd17be00000b9001a00dae30907045a2f66fa36f2bb8aa9029cbb0b8a7b3b5c435ab331"
},
"bytesShouldBeSingleByte00": {
"in": "INVALID",
"out": "8100"
},
"bytesShouldBeSingleByte01": {
"in": "INVALID",
"out": "8100"
},
"bytesShouldBeSingleByte7F": {
"in": "INVALID",
"out": "817F"
}
}

View File

@ -0,0 +1,67 @@
{
"T1": {
"in": "INVALID",
"out": ""
},
"T2": {
"in": "INVALID",
"out": "00ab"
},
"T3": {
"in": "INVALID",
"out": "0000ff"
},
"T4": {
"in": "VALID",
"out": "83646F67636174"
},
"T5": {
"in": "INVALID",
"out": "83646F"
},
"T6": {
"in": "INVALID",
"out": "c7c0c1c0c3c0c1c0ff"
},
"T7": {
"in": "INVALID",
"out": "c7c0c1c0c3c0c1"
},
"T8": {
"in": "INVALID",
"out": "8102"
},
"T9": {
"in": "INVALID",
"out": "b800"
},
"T10": {
"in": "INVALID",
"out": "b800"
},
"T11": {
"in": "INVALID",
"out": "b90000"
},
"T12": {
"in": "INVALID",
"out": "ba0002ffff"
},
"T13": {
"in": "INVALID",
"out": "8154"
}
}

View File

@ -0,0 +1,158 @@
{
"emptystring": {
"in": "",
"out": "80"
},
"bytestring00": {
"in": "\u0000",
"out": "00"
},
"bytestring01": {
"in": "\u0001",
"out": "01"
},
"bytestring7F": {
"in": "\u007F",
"out": "7f"
},
"shortstring": {
"in": "dog",
"out": "83646f67"
},
"shortstring2": {
"in": "Lorem ipsum dolor sit amet, consectetur adipisicing eli",
"out": "b74c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e7365637465747572206164697069736963696e6720656c69"
},
"longstring": {
"in": "Lorem ipsum dolor sit amet, consectetur adipisicing elit",
"out": "b8384c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e7365637465747572206164697069736963696e6720656c6974"
},
"longstring2": {
"in": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur mauris magna, suscipit sed vehicula non, iaculis faucibus tortor. Proin suscipit ultricies malesuada. Duis tortor elit, dictum quis tristique eu, ultrices at risus. Morbi a est imperdiet mi ullamcorper aliquet suscipit nec lorem. Aenean quis leo mollis, vulputate elit varius, consequat enim. Nulla ultrices turpis justo, et posuere urna consectetur nec. Proin non convallis metus. Donec tempor ipsum in mauris congue sollicitudin. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse convallis sem vel massa faucibus, eget lacinia lacus tempor. Nulla quis ultricies purus. Proin auctor rhoncus nibh condimentum mollis. Aliquam consequat enim at metus luctus, a eleifend purus egestas. Curabitur at nibh metus. Nam bibendum, neque at auctor tristique, lorem libero aliquet arcu, non interdum tellus lectus sit amet eros. Cras rhoncus, metus ac ornare cursus, dolor justo ultrices metus, at ullamcorper volutpat",
"out": "b904004c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e73656374657475722061646970697363696e6720656c69742e20437572616269747572206d6175726973206d61676e612c20737573636970697420736564207665686963756c61206e6f6e2c20696163756c697320666175636962757320746f72746f722e2050726f696e20737573636970697420756c74726963696573206d616c6573756164612e204475697320746f72746f7220656c69742c2064696374756d2071756973207472697374697175652065752c20756c7472696365732061742072697375732e204d6f72626920612065737420696d70657264696574206d6920756c6c616d636f7270657220616c6971756574207375736369706974206e6563206c6f72656d2e2041656e65616e2071756973206c656f206d6f6c6c69732c2076756c70757461746520656c6974207661726975732c20636f6e73657175617420656e696d2e204e756c6c6120756c74726963657320747572706973206a7573746f2c20657420706f73756572652075726e6120636f6e7365637465747572206e65632e2050726f696e206e6f6e20636f6e76616c6c6973206d657475732e20446f6e65632074656d706f7220697073756d20696e206d617572697320636f6e67756520736f6c6c696369747564696e2e20566573746962756c756d20616e746520697073756d207072696d697320696e206661756369627573206f726369206c756374757320657420756c74726963657320706f737565726520637562696c69612043757261653b2053757370656e646973736520636f6e76616c6c69732073656d2076656c206d617373612066617563696275732c2065676574206c6163696e6961206c616375732074656d706f722e204e756c6c61207175697320756c747269636965732070757275732e2050726f696e20617563746f722072686f6e637573206e69626820636f6e64696d656e74756d206d6f6c6c69732e20416c697175616d20636f6e73657175617420656e696d206174206d65747573206c75637475732c206120656c656966656e6420707572757320656765737461732e20437572616269747572206174206e696268206d657475732e204e616d20626962656e64756d2c206e6571756520617420617563746f72207472697374697175652c206c6f72656d206c696265726f20616c697175657420617263752c206e6f6e20696e74657264756d2074656c6c7573206c65637475732073697420616d65742065726f732e20437261732072686f6e6375732c206d65747573206163206f726e617265206375727375732c20646f6c6f72206a7573746f20756c747269636573206d657475732c20617420756c6c616d636f7270657220766f6c7574706174"
},
"zero": {
"in": 0,
"out": "80"
},
"smallint": {
"in": 1,
"out": "01"
},
"smallint2": {
"in": 16,
"out": "10"
},
"smallint3": {
"in": 79,
"out": "4f"
},
"smallint4": {
"in": 127,
"out": "7f"
},
"mediumint1": {
"in": 128,
"out": "8180"
},
"mediumint2": {
"in": 1000,
"out": "8203e8"
},
"mediumint3": {
"in": 100000,
"out": "830186a0"
},
"mediumint4": {
"in": "#83729609699884896815286331701780722",
"out": "8f102030405060708090a0b0c0d0e0f2"
},
"mediumint5": {
"in": "#105315505618206987246253880190783558935785933862974822347068935681",
"out": "9c0100020003000400050006000700080009000a000b000c000d000e01"
},
"emptylist": {
"in": [],
"out": "c0"
},
"stringlist": {
"in": [ "dog", "god", "cat" ],
"out": "cc83646f6783676f6483636174"
},
"multilist": {
"in": [ "zw", [ 4 ], 1 ],
"out": "c6827a77c10401"
},
"shortListMax1": {
"in": [ "asdf", "qwer", "zxcv", "asdf","qwer", "zxcv", "asdf", "qwer", "zxcv", "asdf", "qwer"],
"out": "f784617364668471776572847a78637684617364668471776572847a78637684617364668471776572847a78637684617364668471776572"
},
"longList1" : {
"in" : [
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"]
],
"out": "f840cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376"
},
"longList2" : {
"in" : [
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"],
["asdf","qwer","zxcv"]
],
"out": "f90200cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376"
},
"listsoflists": {
"in": [ [ [], [] ], [] ],
"out": "c4c2c0c0c0"
},
"listsoflists2": {
"in": [ [], [[]], [ [], [[]] ] ],
"out": "c7c0c1c0c3c0c1c0"
},
"dictTest1" : {
"in" : [
["key1", "val1"],
["key2", "val2"],
["key3", "val3"],
["key4", "val4"]
],
"out" : "ecca846b6579318476616c31ca846b6579328476616c32ca846b6579338476616c33ca846b6579348476616c34"
},
"bigint": {
"in": "#115792089237316195423570985008687907853269984665640564039457584007913129639936",
"out": "a1010000000000000000000000000000000000000000000000000000000000000000"
}
}

3
tests/rlp/config.nims Normal file
View File

@ -0,0 +1,3 @@
--threads:on
--path:"$projectDir/../.."

View File

@ -0,0 +1,196 @@
import
math, unittest, strutils,
eth/rlp, util/json_testing
proc q(s: string): string = "\"" & s & "\""
proc i(s: string): string = s.replace(" ").replace("\n")
proc inspectMatch(r: Rlp, s: string): bool = r.inspect.i == s.i
test "empty bytes are not a proper RLP":
var rlp = rlpFromBytes Bytes(@[]).toRange
check:
not rlp.hasData
not rlp.isBlob
not rlp.isList
not rlp.isEmpty
expect Exception:
rlp.skipElem
expect Exception:
discard rlp.getType
expect Exception:
for e in rlp:
discard e.getType
test "you cannot finish a list without appending enough elements":
var writer = initRlpList(3)
writer.append "foo"
writer.append "bar"
expect PrematureFinalizationError:
let result = writer.finish
proc withNewLines(x: string): string = x & "\n"
test "encode/decode object":
type
MyEnum = enum
foo,
bar
MyObj = object
a: array[3, char]
b: int
c: MyEnum
var input: MyObj
input.a = ['e', 't', 'h']
input.b = 63
input.c = bar
var writer = initRlpWriter()
writer.append(input)
let bytes = writer.finish()
var rlp = rlpFromBytes(bytes.toRange)
var output = rlp.read(MyObj)
check:
input == output
test "encode and decode lists":
var writer = initRlpList(3)
writer.append "foo"
writer.append ["bar", "baz"]
writer.append [30, 40, 50]
var
bytes = writer.finish
rlp = rlpFromBytes bytes.toRange
check:
bytes.hexRepr == "d183666f6fc8836261728362617ac31e2832"
rlp.inspectMatch """
{
"foo"
{
"bar"
"baz"
}
{
byte 30
byte 40
byte 50
}
}
"""
bytes = encodeList(6000,
"Lorem ipsum dolor sit amet",
"Donec ligula tortor, egestas eu est vitae")
rlp = rlpFromBytes bytes.toRange
check:
rlp.listLen == 3
rlp.listElem(0).toInt(int) == 6000
rlp.listElem(1).toString == "Lorem ipsum dolor sit amet"
rlp.listElem(2).toString == "Donec ligula tortor, egestas eu est vitae"
# test creating RLPs from other RLPs
var list = rlpFromBytes encodeList(rlp.listELem(1), rlp.listELem(0)).toRange
# test that iteration with enterList/skipElem works as expected
list.enterList
check list.toString == "Lorem ipsum dolor sit amet"
list.skipElem
check list.toInt(int32) == 6000.int32
var intVar: int
list >> intVar
check intVar == 6000
check(not list.hasData)
expect Exception: list.skipElem
test "toBytes":
let rlp = rlpFromHex("f2cb847f000001827666827666a040ef02798f211da2e8173d37f255be908871ae65060dbb2f77fb29c0421447f4845ab90b50")
let tok = rlp.listElem(1).toBytes()
check:
tok.len == 32
tok.hexRepr == "40ef02798f211da2e8173d37f255be908871ae65060dbb2f77fb29c0421447f4"
test "nested lists":
let listBytes = encode([[1, 2, 3], [5, 6, 7]])
let listRlp = rlpFromBytes listBytes.toRange
let sublistRlp0 = listRlp.listElem(0)
let sublistRlp1 = listRlp.listElem(1)
check sublistRlp0.listElem(0).toInt(int) == 1
check sublistRlp0.listElem(1).toInt(int) == 2
check sublistRlp0.listElem(2).toInt(int) == 3
check sublistRlp1.listElem(0).toInt(int) == 5
check sublistRlp1.listElem(1).toInt(int) == 6
check sublistRlp1.listElem(2).toInt(int) == 7
test "encoding length":
let listBytes = encode([1,2,3,4,5])
let listRlp = rlpFromBytes listBytes.toRange
check listRlp.listLen == 5
let emptyListBytes = encode ""
check emptyListBytes.len == 1
let emptyListRlp = rlpFromBytes emptyListBytes.toRange
check emptyListRlp.blobLen == 0
test "basic decoding":
var rlp1 = rlpFromHex("856d6f6f7365")
var rlp2 = rlpFromHex("0x856d6f6f7365")
check:
rlp1.inspect == q"moose"
rlp2.inspect == q"moose"
test "malformed/truncated RLP":
var rlp = rlpFromHex("b8056d6f6f7365")
expect MalformedRlpError:
discard rlp.inspect
test "encode byte arrays":
var b1 = [byte(1), 2, 5, 7, 8]
var b2 = [byte(6), 8, 12, 123]
var b3 = @[byte(122), 56, 65, 12]
let rlp = rlpFromBytes(encode((b1, b2, b3)).toRange)
check:
rlp.listLen == 3
rlp.listElem(0).toBytes().toSeq() == @b1
rlp.listElem(1).toBytes().toSeq() == @b2
rlp.listElem(2).toBytes().toSeq() == @b3
# The first byte here is the length of the datum (132 - 128 => 4)
$(rlp.listElem(1).rawData) == "R[132, 6, 8, 12, 123]"
test "empty byte arrays":
var
rlp = rlpFromBytes rlp.encode("").toRange
b = rlp.toBytes
check $b == "R[]"
test "encode/decode floats":
for f in [high(float64), low(float64), 0.1, 122.23,
103487315.128934,
1943935743563457201.391754032785692,
0, -0,
Inf, NegInf, NaN]:
template isNaN(n): bool =
classify(n) == fcNaN
template chk(input) =
let restored = decode(encode(input), float64)
check restored == input or (input.isNaN and restored.isNaN)
chk f
chk -f

View File

@ -0,0 +1,8 @@
import
os, strutils,
util/json_testing
for file in walkDirRec("tests/cases"):
if file.endsWith("json"):
runTests(file)

View File

@ -0,0 +1,82 @@
import
unittest, times, eth/rlp, util/json_testing
type
Transaction = object
amount: int
time: DateTime
sender: string
receiver: string
Foo = object
x: uint64
y: string
z: seq[int]
Bar = object
b: string
f: Foo
CompressedFoo = object
CustomSerialized = object
customFoo {.rlpCustomSerialization.}: Foo
ignored {.rlpIgnore.}: int
rlpFields Foo,
x, y, z
rlpFields Transaction,
sender, receiver, amount
proc default(T: typedesc): T = discard
proc append*(rlpWriter: var RlpWriter, holder: CustomSerialized, f: Foo) =
rlpWriter.append(f.x)
rlpWriter.append(f.y.len)
rlpWriter.append(holder.ignored)
proc read*(rlp: var Rlp, holder: var CustomSerialized, T: type Foo): Foo =
result.x = rlp.read(uint64)
result.y = newString(rlp.read(int))
holder.ignored = rlp.read(int) * 2
test "encoding and decoding an object":
var originalBar = Bar(b: "abracadabra",
f: Foo(x: 5'u64, y: "hocus pocus", z: @[100, 200, 300]))
var bytes = encode(originalBar)
var r = rlpFromBytes(bytes.toRange)
var restoredBar = r.read(Bar)
check:
originalBar == restoredBar
var t1 = Transaction(time: now(), amount: 1000, sender: "Alice", receiver: "Bob")
bytes = encode(t1)
var t2 = bytes.decode(Transaction)
check:
bytes.hexRepr == "cd85416c69636583426f628203e8" # verifies that Alice comes first
t2.time == default(DateTime)
t2.sender == "Alice"
t2.receiver == "Bob"
t2.amount == 1000
test "custom field serialization":
var origVal = CustomSerialized(customFoo: Foo(x: 10'u64, y: "y", z: @[]), ignored: 5)
var bytes = encode(origVal)
var r = rlpFromBytes(bytes.toRange)
var restored = r.read(CustomSerialized)
check:
origVal.customFoo.x == restored.customFoo.x
origVal.customFoo.y.len == restored.customFoo.y.len
restored.ignored == 10
test "RLP fields count":
check:
Bar.rlpFieldsCount == 2
Foo.rlpFieldsCount == 3
Transaction.rlpFieldsCount == 3

View File

@ -0,0 +1,74 @@
import
json, strutils, eth/rlp
proc append(output: var RlpWriter, js: JsonNode) =
case js.kind
of JNull, JFloat, JObject:
raise newException(ValueError, "Unsupported JSON value type " & $js.kind)
of JBool:
output.append js.bval.int
of JInt:
output.append int(js.num)
of JString:
output.append js.str
of JArray:
output.append js.elems
proc hexRepr*(bytes: BytesRange|Bytes): string =
result = newStringOfCap(bytes.len * 2)
for byte in bytes:
result.add(toHex(int(byte), 2).toLowerAscii)
proc `==`(lhs: JsonNode, rhs: string): bool =
lhs.kind == JString and lhs.str == rhs
proc runTests*(filename: string) =
let js = json.parseFile(filename)
for testname, testdata in js:
template testStatus(status: string) =
echo status, " ", filename, " :: ", testname
let
input = testdata{"in"}
output = testdata{"out"}
if input.isNil or output.isNil or output.kind != JString:
testStatus "IGNORED"
continue
if input == "VALID":
var rlp = rlpFromHex(output.str)
discard rlp.inspect
elif input == "INVALID":
var success = false
var inspectOutput = ""
try:
var rlp = rlpFromHex(output.str)
inspectOutput = rlp.inspect(1)
discard rlp.getType
while rlp.hasData: discard rlp.toNodes
except MalformedRlpError, ValueError:
success = true
if not success:
testStatus "FAILED"
echo " ACCEPTED MALFORMED BYTES: ", output.str
echo " INTERPRETATION:\n", inspectOutput
continue
else:
if input.kind == JString and input.str.len != 0 and input.str[0] == '#':
continue
var outRlp = initRlpWriter()
outRlp.append input
let
actual = outRlp.finish.hexRepr
expected = output.str
if actual != expected:
testStatus "FAILED"
echo " EXPECTED BYTES: ", expected
echo " ACTUAL BYTES: ", actual
continue
testStatus "OK"

View File

@ -1,4 +1,4 @@
import unittest, eth/common, rlp
import unittest, eth/common, eth/rlp
proc `==`(a, b: HashOrStatus): bool =
result = a.isHash == b.isHash

8
tests/trie/all.nim Normal file
View File

@ -0,0 +1,8 @@
import
test_binaries_utils, test_bin_trie,
test_branches_utils, examples,
test_hexary_trie, test_json_suite,
test_sparse_binary_trie,
test_storage_backends,
test_caching_db_backend

View File

@ -0,0 +1,44 @@
{
"test1": {
"in": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":
"0xf848018405f446a7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87":
"0xf8440101a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a004bccc5d94f4d1f99aab44369a910179931772f2a5c001c3229f57831c102769",
"0xd2571607e241ecf590ed94b12d87c94babe36db6":
"0xf8440180a0ba4b47865c55a341a4a78759bb913cd15c3ee8eaf30a62fa8d1c8863113d84e8a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x62c01474f089b07dae603491675dc5b5748f7049":
"0xf8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba":
"0xf8478083019a59a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"root": "0x730a444e08ab4b8dee147c9b232fc52d34a223d600031c1e9d25bfc985cbd797",
"hexEncoded": true
},
"test2": {
"in": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":
"0xf84c01880de0b6b3a7622746a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87":
"0xf84780830186b7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0501653f02840675b1aab0328c6634762af5d51764e78f9641cccd9b27b90db4f",
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba":
"0xf8468082521aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"root": "0xa7c787bf470808896308c215e22c7a580a0087bb6db6e8695fb4759537283a83",
"hexEncoded": true
},
"test3": {
"in": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":
"0xf84c01880de0b6b3a7614bc3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87":
"0xf84880840132b3a0a065fee2fffd7a68488cf7ef79f35f7979133172ac5727b5e0cf322953d13de492a06e5d8fec8b6b9bf41c3fb9b61696d5c87b66f6daa98d5f02ba9361b0c6916467",
"0x0000000000000000000000000000000000000001":
"0xf8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba":
"0xf8478083012d9da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"root": "0x40b37be88a49e2c08b8d33fcb03a0676ffd0481df54dfebd3512b8ec54f40cad",
"hexEncoded": true
}
}

View File

@ -0,0 +1,54 @@
{
"singleItem": {
"in": {
"A": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"root": "0xd23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab"
},
"dogs": {
"in": {
"doe": "reindeer",
"dog": "puppy",
"dogglesworth": "cat"
},
"root": "0x8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"
},
"puppy": {
"in": {
"do": "verb",
"horse": "stallion",
"doge": "coin",
"dog": "puppy"
},
"root": "0x5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84"
},
"foo": {
"in": {
"foo": "bar",
"food": "bass"
},
"root": "0x17beaa1648bafa633cda809c90c04af50fc8aed3cb40d16efbddee6fdf63c4c3"
},
"smallValues": {
"in": {
"be": "e",
"dog": "puppy",
"bed": "d"
},
"root": "0x3f67c7a47520f79faa29255d2d3c084a7a6df0453116ed7232ff10277a8be68b"
},
"testy": {
"in": {
"test": "test",
"te": "testy"
},
"root": "0x8452568af70d8d140f58d941338542f645fcca50094b20f3c3d8c3df49337928"
},
"hex": {
"in": {
"0x0045": "0x0123456789",
"0x4500": "0x9876543210"
},
"root": "0x285505fcabe84badc8aa310e2aae17eddc7d120aabec8a476902c8184b3a3503"
}
}

View File

@ -0,0 +1,54 @@
{
"singleItem": {
"in": {
"A": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"root": "0xe9e2935138352776cad724d31c9fa5266a5c593bb97726dd2a908fe6d53284df"
},
"dogs": {
"in": {
"doe": "reindeer",
"dog": "puppy",
"dogglesworth": "cat"
},
"root": "0xd4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585"
},
"puppy": {
"in": {
"do": "verb",
"horse": "stallion",
"doge": "coin",
"dog": "puppy"
},
"root": "0x29b235a58c3c25ab83010c327d5932bcf05324b7d6b1185e650798034783ca9d"
},
"foo": {
"in": {
"foo": "bar",
"food": "bass"
},
"root": "0x1385f23a33021025d9e87cca5c66c00de06178807b96a9acc92b7d651ccde842"
},
"smallValues": {
"in": {
"be": "e",
"dog": "puppy",
"bed": "d"
},
"root": "0x826a4f9f9054a3e980e54b20da992c24fa20467f1ca635115ef4917be66e746f"
},
"testy": {
"in": {
"test": "test",
"te": "testy"
},
"root": "0xaea54fb6c80499674248a462864c420c9d9f3b3d38c879c12425bade1ad76552"
},
"hex": {
"in": {
"0x0045": "0x0123456789",
"0x4500": "0x9876543210"
},
"root": "0xbc11c02c8ab456db0c4d2728b6a2a6210d06f26a2ace4f7d8bdfc72ddf2630ab"
}
}

View File

@ -0,0 +1,105 @@
{
"emptyValues": {
"in": [
["do", "verb"],
["ether", "wookiedoo"],
["horse", "stallion"],
["shaman", "horse"],
["doge", "coin"],
["ether", null],
["dog", "puppy"],
["shaman", null]
],
"root": "0x5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84"
},
"branchingTests": {
"in":[
["0x04110d816c380812a427968ece99b1c963dfbce6", "something"],
["0x095e7baea6a6c7c4c2dfeb977efac326af552d87", "something"],
["0x0a517d755cebbf66312b30fff713666a9cb917e0", "something"],
["0x24dd378f51adc67a50e339e8031fe9bd4aafab36", "something"],
["0x293f982d000532a7861ab122bdc4bbfd26bf9030", "something"],
["0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5", "something"],
["0x31c640b92c21a1f1465c91070b4b3b4d6854195f", "something"],
["0x37f998764813b136ddf5a754f34063fd03065e36", "something"],
["0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a", "something"],
["0x4f36659fa632310b6ec438dea4085b522a2dd077", "something"],
["0x62c01474f089b07dae603491675dc5b5748f7049", "something"],
["0x729af7294be595a0efd7d891c9e51f89c07950c7", "something"],
["0x83e3e5a16d3b696a0314b30b2534804dd5e11197", "something"],
["0x8703df2417e0d7c59d063caa9583cb10a4d20532", "something"],
["0x8dffcd74e5b5923512916c6a64b502689cfa65e1", "something"],
["0x95a4d7cccb5204733874fa87285a176fe1e9e240", "something"],
["0x99b2fcba8120bedd048fe79f5262a6690ed38c39", "something"],
["0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf", "something"],
["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "something"],
["0xa9647f4a0a14042d91dc33c0328030a7157c93ae", "something"],
["0xaa6cffe5185732689c18f37a7f86170cb7304c2a", "something"],
["0xaae4a2e3c51c04606dcb3723456e58f3ed214f45", "something"],
["0xc37a43e940dfb5baf581a0b82b351d48305fc885", "something"],
["0xd2571607e241ecf590ed94b12d87c94babe36db6", "something"],
["0xf735071cbee190d76b704ce68384fc21e389fbe7", "something"],
["0x04110d816c380812a427968ece99b1c963dfbce6", null],
["0x095e7baea6a6c7c4c2dfeb977efac326af552d87", null],
["0x0a517d755cebbf66312b30fff713666a9cb917e0", null],
["0x24dd378f51adc67a50e339e8031fe9bd4aafab36", null],
["0x293f982d000532a7861ab122bdc4bbfd26bf9030", null],
["0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5", null],
["0x31c640b92c21a1f1465c91070b4b3b4d6854195f", null],
["0x37f998764813b136ddf5a754f34063fd03065e36", null],
["0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a", null],
["0x4f36659fa632310b6ec438dea4085b522a2dd077", null],
["0x62c01474f089b07dae603491675dc5b5748f7049", null],
["0x729af7294be595a0efd7d891c9e51f89c07950c7", null],
["0x83e3e5a16d3b696a0314b30b2534804dd5e11197", null],
["0x8703df2417e0d7c59d063caa9583cb10a4d20532", null],
["0x8dffcd74e5b5923512916c6a64b502689cfa65e1", null],
["0x95a4d7cccb5204733874fa87285a176fe1e9e240", null],
["0x99b2fcba8120bedd048fe79f5262a6690ed38c39", null],
["0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf", null],
["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", null],
["0xa9647f4a0a14042d91dc33c0328030a7157c93ae", null],
["0xaa6cffe5185732689c18f37a7f86170cb7304c2a", null],
["0xaae4a2e3c51c04606dcb3723456e58f3ed214f45", null],
["0xc37a43e940dfb5baf581a0b82b351d48305fc885", null],
["0xd2571607e241ecf590ed94b12d87c94babe36db6", null],
["0xf735071cbee190d76b704ce68384fc21e389fbe7", null]
],
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
},
"jeff": {
"in": [
["0x0000000000000000000000000000000000000000000000000000000000000045", "0x22b224a1420a802ab51d326e29fa98e34c4f24ea"],
["0x0000000000000000000000000000000000000000000000000000000000000046", "0x67706c2076330000000000000000000000000000000000000000000000000000"],
["0x0000000000000000000000000000000000000000000000000000001234567890", "0x697c7b8c961b56f675d570498424ac8de1a918f6"],
["0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6", "0x1234567890"],
["0x0000000000000000000000007ef9e639e2733cb34e4dfc576d4b23f72db776b2", "0x4655474156000000000000000000000000000000000000000000000000000000"],
["0x000000000000000000000000ec4f34c97e43fbb2816cfd95e388353c7181dab1", "0x4e616d6552656700000000000000000000000000000000000000000000000000"],
["0x4655474156000000000000000000000000000000000000000000000000000000", "0x7ef9e639e2733cb34e4dfc576d4b23f72db776b2"],
["0x4e616d6552656700000000000000000000000000000000000000000000000000", "0xec4f34c97e43fbb2816cfd95e388353c7181dab1"],
["0x0000000000000000000000000000000000000000000000000000001234567890", null],
["0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6", "0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000"],
["0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000", "0x697c7b8c961b56f675d570498424ac8de1a918f6"]
],
"root": "0x9f6221ebb8efe7cff60a716ecb886e67dd042014be444669f0159d8e68b42100"
},
"insert-middle-leaf": {
"in": [
[ "key1aa", "0123456789012345678901234567890123456789xxx"],
[ "key1", "0123456789012345678901234567890123456789Very_Long"],
[ "key2bb", "aval3"],
[ "key2", "short"],
[ "key3cc", "aval3"],
[ "key3","1234567890123456789012345678901"]
],
"root": "0xcb65032e2f76c48b82b5c24b3db8f670ce73982869d38cd39a624f23d62a9e89"
},
"branch-value-update": {
"in": [
[ "abc", "123" ],
[ "abcd", "abcd" ],
[ "abc", "abc" ]
],
"root": "0x7a320748f780ad9ad5b0837302075ce0eeba6c26e3d8562c67ccc0f1b273298a"
}
}

View File

@ -0,0 +1,86 @@
{
"emptyValues": {
"in": [
["do", "verb"],
["ether", "wookiedoo"],
["horse", "stallion"],
["shaman", "horse"],
["doge", "coin"],
["ether", null],
["dog", "puppy"],
["shaman", null]
],
"root": "0x29b235a58c3c25ab83010c327d5932bcf05324b7d6b1185e650798034783ca9d"
},
"branchingTests": {
"in":[
["0x04110d816c380812a427968ece99b1c963dfbce6", "something"],
["0x095e7baea6a6c7c4c2dfeb977efac326af552d87", "something"],
["0x0a517d755cebbf66312b30fff713666a9cb917e0", "something"],
["0x24dd378f51adc67a50e339e8031fe9bd4aafab36", "something"],
["0x293f982d000532a7861ab122bdc4bbfd26bf9030", "something"],
["0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5", "something"],
["0x31c640b92c21a1f1465c91070b4b3b4d6854195f", "something"],
["0x37f998764813b136ddf5a754f34063fd03065e36", "something"],
["0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a", "something"],
["0x4f36659fa632310b6ec438dea4085b522a2dd077", "something"],
["0x62c01474f089b07dae603491675dc5b5748f7049", "something"],
["0x729af7294be595a0efd7d891c9e51f89c07950c7", "something"],
["0x83e3e5a16d3b696a0314b30b2534804dd5e11197", "something"],
["0x8703df2417e0d7c59d063caa9583cb10a4d20532", "something"],
["0x8dffcd74e5b5923512916c6a64b502689cfa65e1", "something"],
["0x95a4d7cccb5204733874fa87285a176fe1e9e240", "something"],
["0x99b2fcba8120bedd048fe79f5262a6690ed38c39", "something"],
["0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf", "something"],
["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "something"],
["0xa9647f4a0a14042d91dc33c0328030a7157c93ae", "something"],
["0xaa6cffe5185732689c18f37a7f86170cb7304c2a", "something"],
["0xaae4a2e3c51c04606dcb3723456e58f3ed214f45", "something"],
["0xc37a43e940dfb5baf581a0b82b351d48305fc885", "something"],
["0xd2571607e241ecf590ed94b12d87c94babe36db6", "something"],
["0xf735071cbee190d76b704ce68384fc21e389fbe7", "something"],
["0x04110d816c380812a427968ece99b1c963dfbce6", null],
["0x095e7baea6a6c7c4c2dfeb977efac326af552d87", null],
["0x0a517d755cebbf66312b30fff713666a9cb917e0", null],
["0x24dd378f51adc67a50e339e8031fe9bd4aafab36", null],
["0x293f982d000532a7861ab122bdc4bbfd26bf9030", null],
["0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5", null],
["0x31c640b92c21a1f1465c91070b4b3b4d6854195f", null],
["0x37f998764813b136ddf5a754f34063fd03065e36", null],
["0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a", null],
["0x4f36659fa632310b6ec438dea4085b522a2dd077", null],
["0x62c01474f089b07dae603491675dc5b5748f7049", null],
["0x729af7294be595a0efd7d891c9e51f89c07950c7", null],
["0x83e3e5a16d3b696a0314b30b2534804dd5e11197", null],
["0x8703df2417e0d7c59d063caa9583cb10a4d20532", null],
["0x8dffcd74e5b5923512916c6a64b502689cfa65e1", null],
["0x95a4d7cccb5204733874fa87285a176fe1e9e240", null],
["0x99b2fcba8120bedd048fe79f5262a6690ed38c39", null],
["0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf", null],
["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", null],
["0xa9647f4a0a14042d91dc33c0328030a7157c93ae", null],
["0xaa6cffe5185732689c18f37a7f86170cb7304c2a", null],
["0xaae4a2e3c51c04606dcb3723456e58f3ed214f45", null],
["0xc37a43e940dfb5baf581a0b82b351d48305fc885", null],
["0xd2571607e241ecf590ed94b12d87c94babe36db6", null],
["0xf735071cbee190d76b704ce68384fc21e389fbe7", null]
],
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
},
"jeff": {
"in": [
["0x0000000000000000000000000000000000000000000000000000000000000045", "0x22b224a1420a802ab51d326e29fa98e34c4f24ea"],
["0x0000000000000000000000000000000000000000000000000000000000000046", "0x67706c2076330000000000000000000000000000000000000000000000000000"],
["0x0000000000000000000000000000000000000000000000000000001234567890", "0x697c7b8c961b56f675d570498424ac8de1a918f6"],
["0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6", "0x1234567890"],
["0x0000000000000000000000007ef9e639e2733cb34e4dfc576d4b23f72db776b2", "0x4655474156000000000000000000000000000000000000000000000000000000"],
["0x000000000000000000000000ec4f34c97e43fbb2816cfd95e388353c7181dab1", "0x4e616d6552656700000000000000000000000000000000000000000000000000"],
["0x4655474156000000000000000000000000000000000000000000000000000000", "0x7ef9e639e2733cb34e4dfc576d4b23f72db776b2"],
["0x4e616d6552656700000000000000000000000000000000000000000000000000", "0xec4f34c97e43fbb2816cfd95e388353c7181dab1"],
["0x0000000000000000000000000000000000000000000000000000001234567890", null],
["0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6", "0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000"],
["0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000", "0x697c7b8c961b56f675d570498424ac8de1a918f6"]
],
"root": "0x72adb52e9d9428f808e3e8045be18d3baa77881d0cfab89a17a2bcbacee2f320"
}
}

View File

@ -0,0 +1,19 @@
{
"basic": {
"in": [ "cat", "doge", "wallace" ],
"tests": [
[ "", "", "cat" ],
[ "bobo", "", "cat" ],
[ "c", "", "cat" ],
[ "car", "", "cat" ],
[ "cat", "", "doge" ],
[ "catering", "cat", "doge" ],
[ "d", "cat", "doge" ],
[ "doge", "cat", "wallace" ],
[ "dogerton", "doge", "wallace" ],
[ "w", "doge", "wallace" ],
[ "wallace", "doge", "" ],
[ "wallace123", "wallace", ""]
]
}
}

3
tests/trie/config.nims Normal file
View File

@ -0,0 +1,3 @@
--threads:on
--path:"$projectDir/../.."

92
tests/trie/examples.nim Normal file
View File

@ -0,0 +1,92 @@
import
unittest,
nimcrypto/[keccak, hash],
eth/trie/[defs, db, binary, binaries, utils, branches]
suite "examples":
var db = newMemoryDB()
var trie = initBinaryTrie(db)
test "basic set/get":
trie.set("key1", "value1")
trie.set("key2", "value2")
check trie.get("key1") == "value1".toRange
check trie.get("key2") == "value2".toRange
test "check branch exists":
check checkIfBranchExist(db, trie.getRootHash(), "key") == true
check checkIfBranchExist(db, trie.getRootHash(), "key1") == true
check checkIfBranchExist(db, trie.getRootHash(), "ken") == false
check checkIfBranchExist(db, trie.getRootHash(), "key123") == false
test "branches utils":
var branchA = getBranch(db, trie.getRootHash(), "key1")
# ==> [A, B, C1, D1]
check branchA.len == 4
var branchB = getBranch(db, trie.getRootHash(), "key2")
# ==> [A, B, C2, D2]
check branchB.len == 4
check isValidBranch(branchA, trie.getRootHash(), "key1", "value1") == true
check isValidBranch(branchA, trie.getRootHash(), "key5", "") == true
expect InvalidNode:
check isValidBranch(branchB, trie.getRootHash(), "key1", "value1")
var x = getBranch(db, trie.getRootHash(), "key")
# ==> [A]
check x.len == 1
expect InvalidKeyError:
x = getBranch(db, trie.getRootHash(), "key123") # InvalidKeyError
x = getBranch(db, trie.getRootHash(), "key5") # there is still branch for non-exist key
# ==> [A]
check x.len == 1
test "getWitness":
var branch = getWitness(db, trie.getRootHash(), "key1")
# equivalent to `getBranch(db, trie.getRootHash(), "key1")`
# ==> [A, B, C1, D1]
check branch.len == 4
branch = getWitness(db, trie.getRootHash(), "key")
# this will include additional nodes of "key2"
# ==> [A, B, C1, D1, C2, D2]
check branch.len == 6
branch = getWitness(db, trie.getRootHash(), "")
# this will return the whole trie
# ==> [A, B, C1, D1, C2, D2]
check branch.len == 6
let beforeDeleteLen = db.totalRecordsInMemoryDB
test "verify intermediate entries existence":
var branchs = getWitness(db, trie.getRootHash, zeroBytesRange)
# set operation create new intermediate entries
check branchs.len < beforeDeleteLen
var node = branchs[1]
let nodeHash = keccak256.digest(node.baseAddr, uint(node.len))
var nodes = getTrieNodes(db, nodeHash)
check nodes.len == branchs.len - 1
test "delete sub trie":
# delete all subtrie with key prefixes "key"
trie.deleteSubtrie("key")
check trie.get("key1") == zeroBytesRange
check trie.get("key2") == zeroBytesRange
test "prove the lie":
# `delete` and `deleteSubtrie` not actually delete the nodes
check db.totalRecordsInMemoryDB == beforeDeleteLen
var branchs = getWitness(db, trie.getRootHash, zeroBytesRange)
check branchs.len == 0
test "dictionary syntax API":
# dictionary syntax API
trie["moon"] = "sun"
check "moon" in trie
check trie["moon"] == "sun".toRange

View File

@ -0,0 +1,128 @@
import
unittest, random,
eth/trie/[trie_defs, db, binary],
test_utils
suite "binary trie":
test "different order insert":
randomize()
var kv_pairs = randKVPair()
var result = zeroHash
for _ in 0..<1: # repeat 3 times
var db = newMemoryDB()
var trie = initBinaryTrie(db)
random.shuffle(kv_pairs)
for i, c in kv_pairs:
trie.set(c.key, c.value)
let x = trie.get(c.key)
let y = toRange(c.value)
check y == x
check result == zeroHash or trie.getRootHash() == result
result = trie.getRootHash()
# insert already exist key/value
trie.set(kv_pairs[0].key, kv_pairs[0].value)
check trie.getRootHash() == result
# Delete all key/value
random.shuffle(kv_pairs)
for i, c in kv_pairs:
trie.delete(c.key)
check trie.getRootHash() == zeroHash
const delSubtrieData = [
(("\x12\x34\x56\x78", "78"), ("\x12\x34\x56\x79", "79"), "\x12\x34\x56", true, false),
(("\x12\x34\x56\x78", "78"), ("\x12\x34\x56\xff", "ff"), "\x12\x34\x56", true, false),
(("\x12\x34\x56\x78", "78"), ("\x12\x34\x56\x79", "79"), "\x12\x34\x57", false, false),
(("\x12\x34\x56\x78", "78"), ("\x12\x34\x56\x79", "79"), "\x12\x34\x56\x78\x9a", false, true)
]
test "delete subtrie":
for data in delSubtrieData:
var db = newMemoryDB()
var trie = initBinaryTrie(db)
let kv1 = data[0]
let kv2 = data[1]
let key_to_be_deleted = data[2]
let will_delete = data[3]
let will_raise_error = data[4]
# First test case, delete subtrie of a kv node
trie.set(kv1[0], kv1[1])
trie.set(kv2[0], kv2[1])
check trie.get(kv1[0]) == toRange(kv1[1])
check trie.get(kv2[0]) == toRange(kv2[1])
if will_delete:
trie.deleteSubtrie(key_to_be_deleted)
check trie.get(kv1[0]) == zeroBytesRange
check trie.get(kv2[0]) == zeroBytesRange
check trie.getRootHash() == zeroHash
else:
if will_raise_error:
try:
trie.deleteSubtrie(key_to_be_deleted)
except NodeOverrideError as E:
discard
except:
check(false)
else:
let root_hash_before_delete = trie.getRootHash()
trie.deleteSubtrie(key_to_be_deleted)
check trie.get(kv1[0]) == toRange(kv1[1])
check trie.get(kv2[0]) == toRange(kv2[1])
check trie.getRootHash() == root_hash_before_delete
const invalidKeyData = [
("\x12\x34\x56", false),
("\x12\x34\x56\x77", false),
("\x12\x34\x56\x78\x9a", true),
("\x12\x34\x56\x79\xab", true),
("\xab\xcd\xef", false)
]
test "invalid key":
for data in invalidKeyData:
var db = newMemoryDB()
var trie = initBinaryTrie(db)
trie.set("\x12\x34\x56\x78", "78")
trie.set("\x12\x34\x56\x79", "79")
let invalidKey = data[0]
let if_error = data[1]
check trie.get(invalidKey) == zeroBytesRange
if if_error:
try:
trie.delete(invalidKey)
except NodeOverrideError as E:
discard
except:
check(false)
else:
let previous_root_hash = trie.getRootHash()
trie.delete(invalidKey)
check previous_root_hash == trie.getRootHash()
test "update value":
let keys = randList(string, randGen(32, 32), randGen(100, 100))
let vals = randList(int, randGen(0, 99), randGen(50, 50))
var db = newMemoryDB()
var trie = initBinaryTrie(db)
for key in keys:
trie.set(key, "old")
var current_root = trie.getRootHash()
for i in vals:
trie.set(keys[i], "old")
check current_root == trie.getRootHash()
trie.set(keys[i], "new")
check current_root != trie.getRootHash()
check trie.get(keys[i]) == toRange("new")
current_root = trie.getRootHash()

View File

@ -0,0 +1,176 @@
import
unittest, strutils,
ranges/bitranges, eth/rlp/types, nimcrypto/[keccak, hash],
eth/trie/[binaries, trie_utils],
test_utils
proc parseBitVector(x: string): BitRange =
result = genBitVec(x.len)
for i, c in x:
result[i] = (c == '1')
const
commonPrefixData = [
(@[0b0000_0000.byte], @[0b0000_0000.byte], 8),
(@[0b0000_0000.byte], @[0b1000_0000.byte], 0),
(@[0b1000_0000.byte], @[0b1100_0000.byte], 1),
(@[0b0000_0000.byte], @[0b0100_0000.byte], 1),
(@[0b1110_0000.byte], @[0b1100_0000.byte], 2),
(@[0b0000_1111.byte], @[0b1111_1111.byte], 0)
]
suite "binaries utils":
test "get common prefix length":
for c in commonPrefixData:
var
c0 = c[0]
c1 = c[1]
let actual_a = getCommonPrefixLength(c0.bits, c1.bits)
let actual_b = getCommonPrefixLength(c1.bits, c0.bits)
let expected = c[2]
check actual_a == actual_b
check actual_a == expected
const
None = ""
parseNodeData = {
"\x00\x03\x04\x05\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p":
(0, "00110000010000000101", "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", false),
"\x01\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p":
(1, "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", false),
"\x02value": (2, None, "value", false),
"": (0, None, None, true),
"\x00\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p": (0, None, None, true),
"\x01\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p": (0, None, None, true),
"\x01\x02\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p":
(0, None, None, true),
"\x02": (0, None, None, true),
"\x03": (0, None, None, true)
}
test "node parsing":
for c in parseNodeData:
let input = toRange(c[0])
let node = c[1]
let kind = TrieNodeKind(node[0])
let raiseError = node[3]
var res: TrieNode
if raiseError:
expect(InvalidNode):
res = parseNode(input)
else:
res = parseNode(input)
check(kind == res.kind)
case res.kind
of KV_TYPE:
check(res.keyPath == parseBitVector(node[1]))
check(res.child == toRange(node[2]))
of BRANCH_TYPE:
check(res.leftChild == toRange(node[2]))
check(res.rightChild == toRange(node[2]))
of LEAF_TYPE:
check(res.value == toRange(node[2]))
const
kvData = [
("0", "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", "\x00\x10\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", false),
("" , "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", None, true),
("0", "\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", None, true),
("1", "\x00\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", None, true),
("2", "", None, true)
]
test "kv node encoding":
for c in kvData:
let keyPath = parseBitVector(c[0])
let node = toRange(c[1])
let output = toBytes(c[2])
let raiseError = c[3]
if raiseError:
expect(ValidationError):
check output == encodeKVNode(keyPath, node)
else:
check output == encodeKVNode(keyPath, node)
const
branchData = [
("\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6", "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p",
"\x01\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", false),
("", "\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", None, true),
("\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", "\x01", None, true),
("\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", "12345", None, true),
(repeat('\x01', 33), repeat('\x01', 32), None, true),
]
test "branch node encode":
for c in branchData:
let left = toRange(c[0])
let right = toRange(c[1])
let output = toBytes(c[2])
let raiseError = c[3]
if raiseError:
expect(ValidationError):
check output == encodeBranchNode(left, right)
else:
check output == encodeBranchNode(left, right)
const
leafData = [
("\x03\x04\x05", "\x02\x03\x04\x05", false),
("", None, true)
]
test "leaf node encode":
for c in leafData:
let raiseError = c[2]
if raiseError:
expect(ValidationError):
check toBytes(c[1]) == encodeLeafNode(toRange(c[0]))
else:
check toBytes(c[1]) == encodeLeafNode(toRange(c[0]))
test "random kv encoding":
let lengths = randList(int, randGen(1, 999), randGen(100, 100), unique = false)
for len in lengths:
var k = len
var bitvec = genBitVec(len)
var nodeHash = keccak256.digest(cast[ptr byte](k.addr), uint(sizeof(int))).toRange
var kvnode = encodeKVNode(bitvec, nodeHash).toRange
# first byte if KV_TYPE
# in the middle are 1..n bits of binary-encoded-keypath
# last 32 bytes are hash
var keyPath = decodeToBinKeypath(kvnode[1..^33])
check kvnode[0].ord == KV_TYPE.ord
check keyPath == bitvec
check kvnode[^32..^1] == nodeHash
test "optimized single bit keypath kvnode encoding":
var k = 1
var nodeHash = keccak256.digest(cast[ptr byte](k.addr), uint(sizeof(int))).toRange
var bitvec = genBitVec(1)
bitvec[0] = false
var kvnode = encodeKVNode(bitvec, nodeHash).toRange
var kp = decodeToBinKeypath(kvnode[1..^33])
var okv = encodeKVNode(false, nodeHash).toRange
check okv == kvnode
var okp = decodeToBinKeypath(kvnode[1..^33])
check okp == kp
check okp.len == 1
check okp == bitvec
bitvec[0] = true
kvnode = encodeKVNode(bitvec, nodeHash).toRange
kp = decodeToBinKeypath(kvnode[1..^33])
okv = encodeKVNode(true, nodeHash).toRange
check okv == kvnode
okp = decodeToBinKeypath(kvnode[1..^33])
check okp == kp
check okp.len == 1
check okp == bitvec

View File

@ -0,0 +1,142 @@
import
random, sets, unittest, strutils, sets,
eth/trie/[db, binary, branches]
suite "branches utils":
proc testTrie(): BinaryTrie =
var db = newMemoryDB()
var trie = initBinaryTrie(db)
trie.set("\x12\x34\x56\x78\x9a", "9a")
trie.set("\x12\x34\x56\x78\x9b", "9b")
trie.set("\x12\x34\x56\xff", "ff")
trie
const branchExistData = [
("\x12\x34", true),
("\x12\x34\x56\x78\x9b", true),
("\x12\x56", false),
("\x12\x34\x56\xff\xff", false),
("\x12\x34\x56", true),
("\x12\x34\x56\x78", true)
]
test "branch exists":
var trie = testTrie()
var db = trie.getDB()
for c in branchExistData:
let keyPrefix = c[0].toRange
let if_exist = c[1]
check checkIfBranchExist(db, trie.getRootHash(), keyPrefix) == if_exist
const branchData = [
("\x12\x34", true),
("\x12\x34\x56\xff", true),
("\x12\x34\x56\x78\x9b", true),
("\x12\x56", true),
("\x12\x34\x56\xff\xff", false),
("", false)
]
test "branch":
var trie = testTrie()
var db = trie.getDB()
for c in branchData:
let key = c[0].toRange
let keyValid = c[1]
if keyValid:
let branch = getBranch(db, trie.getRootHash(), key)
check isValidBranch(branch, trie.getRootHash(), key, trie.get(key))
else:
try:
discard getBranch(db, trie.getRootHash(), key)
except InvalidKeyError:
check(true)
except:
check(false)
const trieNodesData = [
("#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xca", @["\x029a"]),
("\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
@["\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x029a",
"\x029b"]),
("\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v",
@["\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x029a",
"\x029b"]),
("X\x99\x8f\x13\xeb\x9bF\x08\xec|\x8b\xd8}\xca\xed\xda\xbb4\tl\xc8\x9bJ;J\xed\x11\x86\xc2\xd7+\xca",
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+",
"\x01\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v7\x94\x07\x18\xc9\x96E\xf1\x9bS1sv\xa2\x8b\x9a\x88\xfd/>5\xcb3\x9e\x03\x08\r\xe2\xe1\xd5\xaaq",
"\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x00\x83\x7fR\xce\xe1\xe1 +\x96\xde\xae\xcdV\x13\x9a \x90.7H\xb6\x80\t\x10\xe1(\x03\x15\xde\x94\x17X\xee\xe1",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x02ff",
"\x029a",
"\x029b"]),
("\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", @[]),
(repeat('0', 32), @[])
]
proc toRanges(x: seq[string]): seq[BytesRange] =
result = newSeq[BytesRange](x.len)
for i, c in x: result[i] = toRange(c)
test "get trie nodes":
var trie = testTrie()
var db = trie.getDB()
for c in trieNodesData:
let root = c[0].toRange()
let nodes = toRanges(c[1])
check toSet(nodes) == toSet(getTrieNodes(db, root))
const witnessData = [
("\x12\x34\x56\x78\x9b",
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+",
"\x01\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v7\x94\x07\x18\xc9\x96E\xf1\x9bS1sv\xa2\x8b\x9a\x88\xfd/>5\xcb3\x9e\x03\x08\r\xe2\xe1\xd5\xaaq",
"\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x029b"]),
("\x12\x34\x56\x78",
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+",
"\x01\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v7\x94\x07\x18\xc9\x96E\xf1\x9bS1sv\xa2\x8b\x9a\x88\xfd/>5\xcb3\x9e\x03\x08\r\xe2\xe1\xd5\xaaq",
"\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x029a",
"\x029b"]),
("\x12\x34\x56",
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+",
"\x01\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v7\x94\x07\x18\xc9\x96E\xf1\x9bS1sv\xa2\x8b\x9a\x88\xfd/>5\xcb3\x9e\x03\x08\r\xe2\xe1\xd5\xaaq",
"\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x00\x83\x7fR\xce\xe1\xe1 +\x96\xde\xae\xcdV\x13\x9a \x90.7H\xb6\x80\t\x10\xe1(\x03\x15\xde\x94\x17X\xee\xe1",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x02ff",
"\x029a",
"\x029b"]),
("\x12",
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+",
"\x01\x13\x07<\xa0w6\xd5O\x91\x93\xb1\xde,0}\xe7\xee\x82\xd7\xf6\xce\x1b^\xb7}\"\n\xe4&\xe2\xd7v7\x94\x07\x18\xc9\x96E\xf1\x9bS1sv\xa2\x8b\x9a\x88\xfd/>5\xcb3\x9e\x03\x08\r\xe2\xe1\xd5\xaaq",
"\x00\x82<M\x84\x97\xc1\xf7S\xf5\xa2\xbb>\xbd\xe9\xc3t\x0f\xac/\xad\xa8\x01\xff\x9aE\t\xc1\xab\x9e\xa3|\xc7Z\xb0v",
"\x00\x83\x7fR\xce\xe1\xe1 +\x96\xde\xae\xcdV\x13\x9a \x90.7H\xb6\x80\t\x10\xe1(\x03\x15\xde\x94\x17X\xee\xe1",
"\x01#\xf037,w\xb9()\x0e4\x92\xdf\x11\xca\xea\xa5\x13/\x10\x1bJ\xa7\x16\x07\x07G\xb1\x01_\x16\xcaG\xe9\xb6\xa1\xfa\xd5\x82\xf4k\x04\x9c\x8e\xc8\x17\xb4G\xe1c*n\xf4o\x02\x85\xf1\x19\xa8\x83`\xfb\xf8\xa2",
"\x02ff",
"\x029a",
"\x029b"]),
(repeat('0', 32),
@["\x00\x80\x124V\xde\xb5\x8f\xdb\x98\xc0\xe8\xed\x10\xde\x84\x89\xe1\xc3\x90\xbeoi7y$sJ\x07\xa1h\xf5t\x1c\xac\r+"]),
]
test "get witness for key prefix":
var trie = testTrie()
var db = trie.getDB()
for c in witnessData:
let key = c[0].toRange
let nodes = toRanges(c[1])
if nodes.len != 0:
let x = toSet(nodes)
let y = toSet(getWitness(db, trie.getRootHash(), key))
check x == y

View File

@ -0,0 +1,46 @@
import
unittest,
eth/trie/db,
eth/trie/backends/caching_backend
let
key1 = [0.byte, 0, 1]
key2 = [0.byte, 0, 2]
key3 = [0.byte, 0, 3]
key4 = [0.byte, 0, 4]
value1 = [1.byte, 0, 1]
value2 = [1.byte, 0, 2]
value3 = [1.byte, 0, 3]
value4 = [1.byte, 0, 4]
suite "Caching DB backend":
test "Basic test":
let mdb = newMemoryDB()
mdb.put(key1, value1)
mdb.put(key2, value2)
let cdb = newCachingDB(mdb)
check:
cdb.get(key1) == @value1
cdb.get(key2) == @value2
cdb.del(key1)
check:
key1 notin cdb
mdb.get(key1) == @value1
cdb.put(key3, value3)
check:
cdb.get(key3) == @value3
key3 notin mdb
cdb.put(key4, value4)
cdb.del(key4)
check(key4 notin cdb)
cdb.commit()
check:
key1 notin mdb
mdb.get(key2) == @value2
mdb.get(key3) == @value3
key4 notin mdb

View File

@ -0,0 +1,376 @@
import
unittest, strutils, sequtils, os,
ranges/typedranges, eth/trie/[hexary, db, trie_defs], nimcrypto/utils,
test_utils, algorithm, eth/rlp/types as rlpTypes, random
template put(t: HexaryTrie|SecureHexaryTrie, key, val: string) =
t.put(key.toBytesRange, val.toBytesRange)
template del(t: HexaryTrie|SecureHexaryTrie, key) =
t.del(key.toBytesRange)
template get(t: HexaryTrie|SecureHexaryTrie, key): auto =
t.get(key.toBytesRange)
suite "hexary trie":
setup:
var
db = newMemoryDB()
tr = initHexaryTrie(db)
test "ref-counted keys crash":
proc addKey(intKey: int) =
var key = newSeqWith(20, 0.byte)
key[19] = byte(intKey)
var data = newSeqWith(29, 1.byte)
var k = key.toRange
let v = tr.get(k)
doAssert(v.len == 0)
tr.put(k, toRange(data))
addKey(166)
addKey(193)
addKey(7)
addKey(101)
addKey(159)
addKey(187)
addKey(206)
addKey(242)
addKey(94)
addKey(171)
addKey(14)
addKey(143)
addKey(237)
addKey(148)
addKey(181)
addKey(147)
addKey(45)
addKey(81)
addKey(77)
addKey(123)
addKey(35)
addKey(24)
addKey(188)
addKey(136)
const genesisAccounts = "tests/cases/mainnet-genesis-accounts.txt"
if fileExists(genesisAccounts):
# This test is optional because it takes a while to run and the
# verification is already being part of Nimbus (see genesis.nim).
#
# On the other hand, it's useful to be able to debug just the trie
# code if problems arise. You can download the genesis-accounts file
# using the the following command at the root at the repo:
#
# wget https://gist.github.com/zah/f3a7d325a71d35df3c2606af05d30910/raw/d8bf8fed3d2760f0054cebf4de254a0564a87322/mainnet-genesis-accounts.txt -P tests/cases
test "genesis hash":
for line in lines(genesisAccounts):
var parts = line.split(" ")
var
key = fromHex(parts[0])
val = fromHex(parts[1])
SecureHexaryTrie(tr).put(key.toRange, val.toRange)
check tr.rootHashHex == "D7F8974FB5AC78D9AC099B9AD5018BEDC2CE0A72DAD1827A1709DA30580F0544"
# lexicographic comparison
proc lexComp(a, b: BytesRange): bool =
var
x = 0
y = 0
xlen = a.len
ylen = b.len
while x != xlen:
if y == ylen or b[y] < a[x]: return false
elif a[x] < b[y]: return true
inc x
inc y
result = y != ylen
proc cmp(a, b: BytesRange): int =
if a == b: return 0
if a.lexComp(b): return 1
return -1
test "get values and keys":
randomize()
var
memdb = newMemoryDB()
trie = initHexaryTrie(memdb)
keys = [
"key".toBytesRange,
"abc".toBytesRange,
"hola".toBytesRange,
"bubble".toBytesRange
]
vals = [
"hello".toBytesRange,
"world".toBytesRange,
"block".toBytesRange,
"chain".toBytesRange
]
for i in 0 ..< keys.len:
trie.put(keys[i], vals[i])
var values = trie.getValues()
values.sort(cmp)
vals.sort(cmp)
check values == vals
var paths = trie.getKeys()
paths.sort(cmp)
keys.sort(cmp)
check paths == keys
paths.setLen(0)
for k in trie.keys:
paths.add(k)
paths.sort(cmp)
keys.sort(cmp)
check paths == keys
values.setLen(0)
paths.setLen(0)
for k, v in trie:
paths.add k
values.add v
paths.sort(cmp)
values.sort(cmp)
check paths == keys
check values == vals
test "get values and keys with random data":
var
memdb = newMemoryDB()
trie = initHexaryTrie(memdb)
keys = randList(BytesRange, randGen(5, 32), randGen(10))
vals = randList(BytesRange, randGen(5, 7), randGen(10))
keys2 = randList(BytesRange, randGen(5, 30), randGen(15))
vals2 = randList(BytesRange, randGen(5, 7), randGen(15))
for i in 0 ..< keys.len:
trie.put(keys[i], vals[i])
for i in 0 ..< keys.len:
check trie.get(keys[i]) == vals[i]
var values = trie.getValues()
values.sort(cmp)
vals.sort(cmp)
check values == vals
let rootHash = trie.rootHash
for i in 0 ..< keys2.len:
trie.put(keys2[i], vals2[i])
var trie2 = initHexaryTrie(memdb, rootHash)
# because of pruning, equality become uncertain
values = trie2.getValues()
values.sort(cmp)
let
cmpResultA = values != vals
cmpResultB = values == vals
check cmpResultB or cmpResultA
var values2 = trie.getValues()
vals2.add vals
values2.sort(cmp)
vals2.sort(cmp)
check values2 == vals2
values2.setLen(0)
for k in trie.values:
values2.add(k)
values2.sort(cmp)
check values2 == vals2
var paths = trie.getKeys()
paths.sort(cmp)
keys2.add keys
keys2.sort(cmp)
check paths == keys2
paths.setLen(0)
for k in trie.keys:
paths.add(k)
paths.sort(cmp)
check paths == keys2
values.setLen(0)
paths.setLen(0)
for k, v in trie:
paths.add k
values.add v
paths.sort(cmp)
values.sort(cmp)
check paths == keys2
check values == vals2
test "non-pruning mode":
var
memdb = newMemoryDB()
nonPruningTrie = initHexaryTrie(memdb, false)
keys = randList(BytesRange, randGen(5, 77), randGen(30))
vals = randList(BytesRange, randGen(1, 57), randGen(30))
moreKeys = randList(BytesRange, randGen(5, 33), randGen(45))
moreVals = randList(BytesRange, randGen(1, 47), randGen(45))
for i in 0 ..< keys.len:
nonPruningTrie.put(keys[i], vals[i])
let rootHash = nonPruningTrie.rootHash
for i in 0 ..< moreKeys.len:
nonPruningTrie.put(moreKeys[i], moreVals[i])
var
readOnlyTrie = initHexaryTrie(memdb, rootHash)
secondaryTrie = initHexaryTrie(memdb, rootHash, false)
keys.sort(cmp)
vals.sort(cmp)
var
roKeys = readOnlyTrie.getKeys()
roValues = readOnlyTrie.getValues()
scKeys = secondaryTrie.getKeys()
scValues = secondaryTrie.getValues()
roKeys.sort(cmp)
roValues.sort(cmp)
scKeys.sort(cmp)
scValues.sort(cmp)
check keys == roKeys
check vals == roValues
check keys == scKeys
check vals == scValues
test "elaborate non-pruning test":
type
History = object
keys: seq[BytesRange]
values: seq[BytesRange]
rootHash: KeccakHash
const
listLength = 30
numLoop = 100
for iteration in 0 ..< numLoop:
var
memdb = newMemoryDB()
nonPruningTrie = initHexaryTrie(memdb, false)
keys = randList(BytesRange, randGen(3, 33), randGen(listLength))
values = randList(BytesRange, randGen(5, 77), randGen(listLength))
historyList = newSeq[History](listLength)
ok = true
for i, k in keys:
historyList[i].keys = newSeq[BytesRange](i + 1)
historyList[i].values = newSeq[BytesRange](i + 1)
for x in 0 ..< i + 1:
historyList[i].keys[x] = keys[x]
historyList[i].values[x] = values[x]
nonPruningTrie.put(keys[i], values[i])
historyList[i].rootHash = nonPruningTrie.rootHash
historyList[i].keys.sort(cmp)
historyList[i].values.sort(cmp)
for h in historyList:
var
trie = initHexaryTrie(memdb, h.rootHash)
pKeys: seq[BytesRange] = @[]
pValues = trie.getValues()
for k in trie.keys:
pKeys.add k
pKeys.sort(cmp)
pValues.sort(cmp)
check pKeys.len == h.keys.len
check pValues.len == h.values.len
check pKeys == h.keys
check pValues == h.values
ok = ok and pKeys.len == h.keys.len
ok = ok and pValues.len == h.values.len
ok = ok and pKeys == h.keys
ok = ok and pValues == h.values
if not ok: break
if not ok:
echo "ITERATION: ", iteration
break
proc isValidBranch(branch: seq[BytesRange], rootHash: KeccakHash, key, value: BytesRange): bool =
# branch must not be empty
assert(branch.len != 0)
var db = newMemoryDB()
for node in branch:
assert(node.len != 0)
let nodeHash = hexary.keccak(node)
db.put(nodeHash.data, node.toOpenArray)
var trie = initHexaryTrie(db, rootHash)
result = trie.get(key) == toRange(value)
test "get branch with pruning trie":
var
memdb = newMemoryDB()
trie = initHexaryTrie(memdb)
keys = randList(BytesRange, randGen(5, 77), randGen(30))
vals = randList(BytesRange, randGen(1, 57), randGen(30))
for i in 0 ..< keys.len:
trie.put(keys[i], vals[i])
for i in 0 ..< keys.len:
var branch = trie.getBranch(keys[i])
check isValidBranch(branch, trie.rootHash, keys[i], vals[i])
test "get branch with non pruning trie":
const
numKeyVal = 30
var
memdb = newMemoryDB()
nonPruningTrie = initHexaryTrie(memdb, false)
keys = randList(BytesRange, randGen(5, 77), randGen(numKeyVal))
vals = randList(BytesRange, randGen(1, 57), randGen(numKeyVal))
roots = newSeq[KeccakHash](numKeyVal)
for i in 0 ..< keys.len:
nonPruningTrie.put(keys[i], vals[i])
roots[i] = nonPruningTrie.rootHash
for i in 0 ..< keys.len:
var trie = initHexaryTrie(memdb, roots[i], false)
for x in 0 ..< i+1:
var branch = trie.getBranch(keys[x])
check isValidBranch(branch, trie.rootHash, keys[x], vals[x])
test "isPruning getter":
var
memdb = newMemoryDB()
nonPruningTrie = initHexaryTrie(memdb, false)
pruningTrie = initHexaryTrie(memdb, true)
nonPruningSecureTrie = initSecureHexaryTrie(memdb, false)
pruningSecureTrie = initSecureHexaryTrie(memdb, true)
check nonPruningTrie.isPruning == false
check pruningTrie.isPruning == true
check nonPruningSecureTRie.isPruning == false
check pruningSecureTRie.isPruning == true

View File

@ -0,0 +1,127 @@
import
os, json, tables, sequtils, strutils, algorithm,
eth/rlp/types, nimcrypto/utils,
eth/trie/[trie_defs, db, hexary],
test_utils
proc `==`(lhs: JsonNode, rhs: string): bool =
lhs.kind == JString and lhs.str == rhs
type
TestOp = object
idx: int
key: BytesRange
value: BytesRange
proc cmp(lhs, rhs: TestOp): int = cmp(lhs.idx, rhs.idx)
proc `<=`(lhs, rhs: TestOp): bool = lhs.idx <= rhs.idx
proc runSingleTest(testSequence: openarray[TestOp],
secureMode: bool,
expectedRootHash: string): bool =
var
db = newMemoryDB()
t = initHexaryTrie(db)
for op in testSequence:
let
k = op.key
v = op.value
if v.len > 0:
if secureMode:
t.SecureHexaryTrie.put k, v
else:
t.put k, v
else:
if secureMode:
t.SecureHexaryTrie.del k
else:
t.del k
return t.rootHashHex == expectedRootHash
proc runTests*(filename: string) =
let js = json.parseFile(filename)
for testname, testdata in js:
template testStatus(status: string) =
echo status, " ", filename, " :: ", testname
template invalidTest =
testStatus "IGNORED"
continue
let
input = testdata{"in"}
root = testdata{"root"}
secureMode = "secure" in filename
permuteOrder = "anyorder" in filename
if input.isNil or root.isNil or root.kind != JString:
invalidTest()
var inputs = newSeq[TestOp](0)
case input.kind
of JArray:
for pair in input.elems:
if pair.kind != JArray or pair.elems.len != 2:
invalidTest()
let
k = pair.elems[0]
v = pair.elems[1]
if k.kind == JString:
case v.kind
of JString:
inputs.add(TestOp(idx: inputs.len,
key: k.str.toBytesRange,
value: v.str.toBytesRange))
of JNull:
inputs.add(TestOp(idx: inputs.len,
key: k.str.toBytesRange,
value: zeroBytesRange))
else: invalidTest()
else: invalidTest()
of JObject:
for k, v in input.fields:
case v.kind
of JString:
inputs.add(TestOp(idx: inputs.len,
key: k.toBytesRange,
value: v.str.toBytesRange))
of JNull:
inputs.add(TestOp(idx: inputs.len,
key: k.toBytesRange,
value: zeroBytesRange))
else: invalidTest()
else: invalidTest()
let expectedRootHash = root.str.substr(2).toUpperAscii
if permuteOrder:
sort(inputs, cmp)
while true:
if not runSingleTest(inputs, secureMode, expectedRootHash):
testStatus "FAILED"
break
if not nextPermutation(inputs):
testStatus "OK"
break
else:
if runSingleTest(inputs, secureMode, expectedRootHash):
testStatus "OK"
else:
testStatus "FAILED"
for file in walkDirRec("tests/cases"):
if file.endsWith("json"):
runTests(file)

View File

@ -0,0 +1,226 @@
import
unittest, random,
eth/trie/[trie_defs, db, sparse_binary, sparse_proofs],
test_utils
suite "sparse binary trie":
randomize()
var kv_pairs = randKVPair(20)
var numbers = randList(int, randGen(1, 99), randGen(50, 100))
var db = newMemoryDB()
var trie = initSparseBinaryTrie(db)
test "basic set":
for c in kv_pairs:
check trie.exists(c.key) == false
trie.set(c.key, c.value)
let prevRoot = trie.getRootHash()
test "basic get":
for c in kv_pairs:
let x = trie.get(c.key)
let y = toRange(c.value)
check x == y
trie.del(c.key)
for c in kv_pairs:
check trie.exists(c.key) == false
check trie.getRootHash() == keccakHash(emptyNodeHashes[0].toOpenArray, emptyNodeHashes[0].toOpenArray).toRange
test "single update set":
random.shuffle(kv_pairs)
for c in kv_pairs:
trie.set(c.key, c.value)
# Check trie root remains the same even in different insert order
check trie.getRootHash() == prevRoot
let prior_to_update_root = trie.getRootHash()
test "single update get":
for i in numbers:
# If new value is the same as current value, skip the update
if toRange($i) == trie.get(kv_pairs[i].key):
continue
# Update
trie.set(kv_pairs[i].key, $i)
check trie.get(kv_pairs[i].key) == toRange($i)
check trie.getRootHash() != prior_to_update_root
# Un-update
trie.set(kv_pairs[i].key, kv_pairs[i].value)
check trie.getRootHash == prior_to_update_root
test "batch update with different update order":
# First batch update
for i in numbers:
trie.set(kv_pairs[i].key, $i)
let batch_updated_root = trie.getRootHash()
# Un-update
random.shuffle(numbers)
for i in numbers:
trie.set(kv_pairs[i].key, kv_pairs[i].value)
check trie.getRootHash() == prior_to_update_root
# Second batch update
random.shuffle(numbers)
for i in numbers:
trie.set(kv_pairs[i].key, $i)
check trie.getRootHash() == batch_updated_root
test "dictionary API":
trie[kv_pairs[0].key] = kv_pairs[0].value
let x = trie[kv_pairs[0].key]
let y = toRange(kv_pairs[0].value)
check x == y
check kv_pairs[0].key in trie
test "get/set for specific root":
db = newMemoryDB()
trie = initSparseBinaryTrie(db)
let
testKey = toRange(kv_pairs[0].key)
testValue = toRange(kv_pairs[0].value)
testKey2 = toRange(kv_pairs[1].key)
testValue2 = toRange(kv_pairs[1].value)
trie.set(testKey, testValue)
var root = trie.getRootHash()
var value = trie.get(testKey, root)
check value == testValue
root = trie.set(testKey2, testValue2, root)
value = trie.get(testKey2, root)
check value == testValue2
value = trie.get(testKey, root)
check value == testValue
proc makeBadProof(size: int, width = 32): seq[BytesRange] =
let badProofStr = randList(string, randGen(width, width), randGen(size, size))
result = newSeq[BytesRange](size)
for i in 0 ..< result.len:
result[i] = toRange(badProofStr[i])
test "proofs":
const
MaxBadProof = 32 * 8
let
testKey = kv_pairs[0].key
badKey = kv_pairs[1].key
testValue = "testValue"
testValue2 = "testValue2"
badValue = "badValue"
badProof = makeBadProof(MaxBadProof)
trie[testKey] = testValue
var proof = trie.prove(testKey)
check proof.len == treeHeight
check verifyProof(proof, trie.getRootHash(), testKey, testValue) == true
check verifyProof(proof, trie.getRootHash(), testKey, badValue) == false
check verifyProof(proof, trie.getRootHash(), badKey, testValue) == false
check verifyProof(badProof, trie.getRootHash(), testKey, testValue) == false
let
testKey2 = kv_pairs[2].key
testKey3 = kv_pairs[3].key
defaultValue = zeroBytesRange
trie.set(testKey2, testValue)
proof = trie.prove(testKey)
check verifyProof(proof, trie.getRootHash(), testKey, testValue) == true
check verifyProof(proof, trie.getRootHash(), testKey, badValue) == false
check verifyProof(proof, trie.getRootHash(), testKey2, testValue) == false
check verifyProof(badProof, trie.getRootHash(), testKey, testValue) == false
proof = trie.prove(testKey2)
check verifyProof(proof, trie.getRootHash(), testKey2, testValue) == true
check verifyProof(proof, trie.getRootHash(), testKey2, badValue) == false
check verifyProof(proof, trie.getRootHash(), testKey3, testValue) == false
check verifyProof(badProof, trie.getRootHash(), testKey, testValue) == false
var compactProof = compactProof(proof)
var decompactedProof = decompactProof(compactProof)
check decompactedProof.len == proof.len
for i, c in proof:
check decompactedProof[i] == c
let
badProof2 = makeBadProof(MaxBadProof + 1)
badProof3 = makeBadProof(MaxBadProof - 1)
badProof4 = makeBadProof(MaxBadProof, 31)
badProof5 = makeBadProof(MaxBadProof, 33)
badProof6 = makeBadProof(MaxBadProof, 1)
check verifyProof(badProof2, trie.getRootHash(), testKey3, defaultValue) == false
check verifyProof(badProof3, trie.getRootHash(), testKey3, defaultValue) == false
check verifyProof(badProof4, trie.getRootHash(), testKey3, defaultValue) == false
check verifyProof(badProof5, trie.getRootHash(), testKey3, defaultValue) == false
check verifyProof(badProof6, trie.getRootHash(), testKey3, defaultValue) == false
check compactProof(badProof2).len == 0
check compactProof(badProof3).len == 0
check decompactProof(badProof3).len == 0
var zeroProof: seq[BytesRange]
check decompactProof(zeroProof).len == 0
proof = trie.proveCompact(testKey2)
check verifyCompactProof(proof, trie.getRootHash(), testKey2, testValue) == true
check verifyCompactProof(proof, trie.getRootHash(), testKey2, badValue) == false
check verifyCompactProof(proof, trie.getRootHash(), testKey3, testValue) == false
check verifyCompactProof(badProof, trie.getRootHash(), testKey, testValue) == false
var root = trie.getRootHash()
trie.set(testKey2, testValue2)
proof = trie.proveCompact(testKey2, root)
check verifyCompactProof(proof, root, testKey2, testValue) == true
check verifyCompactProof(proof, root, testKey2, badValue) == false
check verifyCompactProof(proof, root, testKey3, testValue) == false
check verifyCompactProof(badProof, root, testKey, testValue) == false
proof = trie.prove(testKey2, root)
check verifyProof(proof, root, testKey2, testValue) == true
check verifyProof(proof, root, testKey2, badValue) == false
check verifyProof(proof, root, testKey3, testValue) == false
check verifyProof(badProof, root, testKey, testValue) == false
proof = trie.prove(testKey3)
check proof.len == 0
check verifyProof(proof, trie.getRootHash(), testKey3, defaultValue) == false
check verifyProof(proof, trie.getRootHash(), testKey3, badValue) == false
check verifyProof(proof, trie.getRootHash(), testKey2, defaultValue) == false
check verifyProof(badProof, trie.getRootHash(), testKey, testValue) == false
test "examples":
let
key1 = "01234567890123456789"
key2 = "abcdefghijklmnopqrst"
trie.set(key1, "value1")
trie.set(key2, "value2")
check trie.get(key1) == "value1".toRange
check trie.get(key2) == "value2".toRange
trie.del(key1)
check trie.get(key1) == zeroBytesRange
trie.del(key2)
check trie[key2] == zeroBytesRange
let
value1 = "hello world"
badValue = "bad value"
trie[key1] = value1
var proof = trie.prove(key1)
check verifyProof(proof, trie.getRootHash(), key1, value1) == true
check verifyProof(proof, trie.getRootHash(), key1, badValue) == false
check verifyProof(proof, trie.getRootHash(), key2, value1) == false

View File

@ -0,0 +1,63 @@
import
unittest, macros, os,
eth/trie/backends/[rocksdb_backend, sqlite_backend, lmdb_backend]
template dummyInstance(T: type SqliteChainDB): auto =
sqlite_backend.newChainDB(getTempDir(), inMemory = true)
template dummyInstance(T: type RocksChainDB): auto =
let tmp = getTempDir() / "nimbus-test-db"
removeDir(tmp)
rocksdb_backend.newChainDB(tmp)
template dummyInstance(T: type LmdbChainDB): auto =
# remove sqlite created database
let tmp = getTempDir() / "nimbus.db"
removeFile(tmp)
lmdb_backend.newChainDB(getTempDir())
template backendTests(DB) =
suite("storage tests: " & astToStr(DB)):
setup:
var db = dummyInstance(DB)
teardown:
close(db)
test "basic insertions and deletions":
var keyA = [1.byte, 2, 3]
var keyB = [1.byte, 2, 4]
var value1 = @[1.byte, 2, 3, 4, 5]
var value2 = @[7.byte, 8, 9, 10]
db.put(keyA, value1)
check:
keyA in db
keyB notin db
db.put(keyB, value2)
check:
keyA in db
keyB in db
check:
db.get(keyA) == value1
db.get(keyB) == value2
db.del(keyA)
db.put(keyB, value1)
check:
keyA notin db
keyB in db
check db.get(keyA) == @[]
check db.get(keyB) == value1
db.del(keyA)
backendTests(RocksChainDB)
backendTests(SqliteChainDB)
backendTests(LmdbChainDB)

85
tests/trie/test_utils.nim Normal file
View File

@ -0,0 +1,85 @@
import
random, sets, eth/trie/trie_utils as ethUtils,
eth/rlp/types as rlpTypes, ranges/bitranges, nimcrypto/utils
type
RandGen*[T] = object
minVal, maxVal: T
KVPair* = ref object
key*: string
value*: string
proc randGen*[T](minVal, maxVal: T): RandGen[T] =
assert(minVal <= maxVal)
result.minVal = minVal
result.maxVal = maxVal
proc randGen*[T](minMax: T): RandGen[T] =
randGen(minMax, minMax)
proc getVal*[T](x: RandGen[T]): T =
if x.minVal == x.maxVal: return x.minVal
rand(x.minVal..x.maxVal)
proc randString*(len: int): string =
result = newString(len)
for i in 0..<len:
result[i] = rand(255).char
proc toBytesRange*(str: string): BytesRange =
var s: seq[byte]
if str[0] == '0' and str[1] == 'x':
s = fromHex(str.substr(2))
else:
s = newSeq[byte](str.len)
for i in 0 ..< str.len:
s[i] = byte(str[i])
result = s.toRange
proc randPrimitives*[T](val: int): T =
when T is string:
randString(val)
elif T is int:
result = val
elif T is BytesRange:
result = randString(val).toRange
proc randList*(T: typedesc, strGen, listGen: RandGen, unique: bool = true): seq[T] =
let listLen = listGen.getVal()
result = newSeqOfCap[T](listLen)
if unique:
var set = initSet[T]()
for len in 0..<listLen:
while true:
let x = randPrimitives[T](strGen.getVal())
if x notin set:
result.add x
set.incl x
break
else:
for len in 0..<listLen:
let x = randPrimitives[T](strGen.getVal())
result.add x
proc randKVPair*(keySize = 32): seq[KVPair] =
const listLen = 100
let keys = randList(string, randGen(keySize, keySize), randGen(listLen, listLen))
let vals = randList(string, randGen(1, 100), randGen(listLen, listLen))
result = newSeq[KVPair](listLen)
for i in 0..<listLen:
result[i] = KVPair(key: keys[i], value: vals[i])
proc toBytes*(str: string): Bytes =
result = newSeq[byte](str.len)
for i in 0..<str.len:
result[i] = byte(str[i])
proc genBitVec*(len: int): BitRange =
let k = ((len + 7) and (not 7)) shr 3
var s = newSeq[byte](k)
result = bits(s, len)
for i in 0..<len:
result[i] = rand(2) == 1