These files were previously removed but put back in by mistaken during a rebase from master. (#2338)
This commit is contained in:
parent
c48b527eea
commit
1b784695d5
|
@ -1,526 +0,0 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
typetraits,
|
||||
faststreams/inputs, eth/[common, rlp], stint,
|
||||
eth/trie/trie_defs,
|
||||
results,
|
||||
./witness_types, stew/byteutils, ../nimbus/[constants, db/core_db]
|
||||
|
||||
type
|
||||
DB = CoreDbRef
|
||||
|
||||
NodeKey = object
|
||||
usedBytes: int
|
||||
data: array[32, byte]
|
||||
|
||||
AccountAndSlots* = object
|
||||
address*: EthAddress
|
||||
codeLen*: int
|
||||
slots*: seq[StorageSlot]
|
||||
|
||||
TreeBuilder = object
|
||||
when defined(useInputStream):
|
||||
input: InputStream
|
||||
else:
|
||||
input: seq[byte]
|
||||
pos: int
|
||||
db: DB
|
||||
root: KeccakHash
|
||||
flags: WitnessFlags
|
||||
keys*: seq[AccountAndSlots]
|
||||
|
||||
# this TreeBuilder support short node parsing
|
||||
# but a block witness should not contains short node
|
||||
# for account trie. Short rlp node only appears in
|
||||
# storage trie with depth >= 9
|
||||
|
||||
# the InputStream still unstable
|
||||
# when using large dataset for testing
|
||||
# or run longer
|
||||
|
||||
when defined(useInputStream):
|
||||
proc initTreeBuilder*(input: InputStream, db: DB, flags: WitnessFlags): TreeBuilder =
|
||||
result.input = input
|
||||
result.db = db
|
||||
result.root = emptyRlpHash
|
||||
result.flags = flags
|
||||
|
||||
proc initTreeBuilder*(input: openArray[byte], db: DB, flags: WitnessFlags): TreeBuilder =
|
||||
result.input = memoryInput(input)
|
||||
result.db = db
|
||||
result.root = emptyRlpHash
|
||||
result.flags = flags
|
||||
else:
|
||||
proc initTreeBuilder*(input: openArray[byte], db: DB, flags: WitnessFlags): TreeBuilder =
|
||||
result.input = @input
|
||||
result.db = db
|
||||
result.root = emptyRlpHash
|
||||
result.flags = flags
|
||||
|
||||
func rootHash*(t: TreeBuilder): KeccakHash {.inline.} =
|
||||
t.root
|
||||
|
||||
func getDB*(t: TreeBuilder): DB {.inline.} =
|
||||
t.db
|
||||
|
||||
when defined(useInputStream):
|
||||
template readByte(t: var TreeBuilder): byte =
|
||||
t.input.read
|
||||
|
||||
template len(t: TreeBuilder): int =
|
||||
t.input.len
|
||||
|
||||
template read(t: var TreeBuilder, len: int): auto =
|
||||
t.input.read(len)
|
||||
|
||||
template readable(t: var TreeBuilder): bool =
|
||||
t.input.readable
|
||||
|
||||
template readable(t: var TreeBuilder, len: int): bool =
|
||||
t.input.readable(len)
|
||||
|
||||
else:
|
||||
template readByte(t: var TreeBuilder): byte =
|
||||
let pos = t.pos
|
||||
inc t.pos
|
||||
t.input[pos]
|
||||
|
||||
template len(t: TreeBuilder): int =
|
||||
t.input.len
|
||||
|
||||
template readable(t: var TreeBuilder): bool =
|
||||
t.pos < t.input.len
|
||||
|
||||
template readable(t: var TreeBuilder, length: int): bool =
|
||||
t.pos + length <= t.input.len
|
||||
|
||||
template read(t: var TreeBuilder, len: int): auto =
|
||||
let pos = t.pos
|
||||
inc(t.pos, len)
|
||||
toOpenArray(t.input, pos, pos+len-1)
|
||||
|
||||
proc safeReadByte(t: var TreeBuilder): byte =
|
||||
if t.readable:
|
||||
result = t.readByte()
|
||||
else:
|
||||
raise newException(ParsingError, "Cannot read byte from input stream")
|
||||
|
||||
when defined(debugHash):
|
||||
proc safeReadU32(t: var TreeBuilder): uint32 =
|
||||
if t.readable(4):
|
||||
result = fromBytesBE(uint32, t.read(4))
|
||||
else:
|
||||
raise newException(ParsingError, "Cannot read U32 from input stream")
|
||||
|
||||
template safeReadEnum(t: var TreeBuilder, T: type): untyped =
|
||||
let typ = t.safeReadByte.int
|
||||
if typ < low(T).int or typ > high(T).int:
|
||||
raise newException(ParsingError, "Wrong " & T.name & " value " & $typ)
|
||||
T(typ)
|
||||
|
||||
template safeReadBytes(t: var TreeBuilder, length: int, body: untyped) =
|
||||
if t.readable(length):
|
||||
body
|
||||
else:
|
||||
raise newException(ParsingError, "Failed when try to read " & $length & " bytes")
|
||||
|
||||
proc readUVarint32(t: var TreeBuilder): uint32 =
|
||||
# LEB128 varint encoding
|
||||
var shift = 0
|
||||
while true:
|
||||
let b = t.safeReadByte()
|
||||
result = result or ((b and 0x7F).uint32 shl shift)
|
||||
if (0x80 and b) == 0:
|
||||
break
|
||||
inc(shift, 7)
|
||||
if shift > 28:
|
||||
raise newException(ParsingError, "Failed when try to parse uvarint32")
|
||||
|
||||
proc readUVarint256(t: var TreeBuilder): UInt256 =
|
||||
# LEB128 varint encoding
|
||||
var shift = 0
|
||||
while true:
|
||||
let b = t.safeReadByte()
|
||||
result = result or ((b and 0x7F).u256 shl shift)
|
||||
if (0x80 and b) == 0:
|
||||
break
|
||||
inc(shift, 7)
|
||||
if shift > 252:
|
||||
raise newException(ParsingError, "Failed when try to parse uvarint256")
|
||||
|
||||
proc toKeccak(r: var NodeKey, x: openArray[byte]) {.inline.} =
|
||||
r.data[0..31] = x[0..31]
|
||||
r.usedBytes = 32
|
||||
|
||||
proc toKeccak(r: var NodeKey, z: byte, x: openArray[byte]) {.inline.} =
|
||||
r.data[0] = z
|
||||
r.data[1..31] = x[0..30]
|
||||
r.usedBytes = 32
|
||||
|
||||
proc append(r: var RlpWriter, n: NodeKey) =
|
||||
if n.usedBytes < 32:
|
||||
r.append rlpFromBytes(n.data.toOpenArray(0, n.usedBytes-1))
|
||||
else:
|
||||
r.append n.data.toOpenArray(0, n.usedBytes-1)
|
||||
|
||||
proc toNodeKey(t: var TreeBuilder, z: openArray[byte]): NodeKey =
|
||||
if z.len < 32:
|
||||
result.usedBytes = z.len
|
||||
result.data[0..z.len-1] = z[0..z.len-1]
|
||||
else:
|
||||
result.data = keccakHash(z).data
|
||||
result.usedBytes = 32
|
||||
t.db.newKvt().put(result.data, z).isOkOr:
|
||||
raiseAssert "toNodeKey(): put() failed: " & $$error
|
||||
|
||||
proc toNodeKey(z: openArray[byte]): NodeKey =
|
||||
if z.len >= 32:
|
||||
raise newException(ParsingError, "Failed when try to convert short rlp to NodeKey")
|
||||
result.usedBytes = z.len
|
||||
result.data[0..z.len-1] = z[0..z.len-1]
|
||||
|
||||
proc forceSmallNodeKeyToHash(t: var TreeBuilder, r: NodeKey): NodeKey =
|
||||
let hash = keccakHash(r.data.toOpenArray(0, r.usedBytes-1))
|
||||
t.db.newKvt().put(hash.data, r.data.toOpenArray(0, r.usedBytes-1)).isOkOr:
|
||||
raiseAssert "forceSmallNodeKeyToHash(): put() failed: " & $$error
|
||||
result.data = hash.data
|
||||
result.usedBytes = 32
|
||||
|
||||
proc writeCode(t: var TreeBuilder, code: openArray[byte]): Hash256 =
|
||||
result = keccakHash(code)
|
||||
t.db.newKvt().put(result.data, code).isOkOr:
|
||||
raiseAssert "writeCode(): put() failed: " & $$error
|
||||
|
||||
proc branchNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey {.gcsafe.}
|
||||
proc extensionNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey {.gcsafe.}
|
||||
proc accountNode(t: var TreeBuilder, depth: int): NodeKey {.gcsafe.}
|
||||
proc accountStorageLeafNode(t: var TreeBuilder, depth: int): NodeKey {.gcsafe.}
|
||||
proc hashNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey {.gcsafe.}
|
||||
proc treeNode(t: var TreeBuilder, depth: int = 0, storageMode = false): NodeKey {.gcsafe.}
|
||||
|
||||
proc buildTree*(t: var TreeBuilder): KeccakHash
|
||||
{.raises: [ParsingError, Exception].} =
|
||||
let version = t.safeReadByte().int
|
||||
if version != BlockWitnessVersion.int:
|
||||
raise newException(ParsingError, "Wrong block witness version")
|
||||
|
||||
# one or more trees
|
||||
|
||||
# we only parse one tree here
|
||||
let metadataType = t.safeReadByte().int
|
||||
if metadataType != MetadataNothing.int:
|
||||
raise newException(ParsingError, "This tree builder support no metadata")
|
||||
|
||||
var res = treeNode(t)
|
||||
if res.usedBytes != 32:
|
||||
raise newException(ParsingError, "Buildtree should produce hash")
|
||||
|
||||
result.data = res.data
|
||||
|
||||
# after the block witness spec mention how to split the big tree into
|
||||
# chunks, modify this buildForest into chunked witness tree builder
|
||||
proc buildForest*(
|
||||
t: var TreeBuilder): seq[KeccakHash]
|
||||
{.raises: [ParsingError, Exception].} =
|
||||
let version = t.safeReadByte().int
|
||||
if version != BlockWitnessVersion.int:
|
||||
raise newException(ParsingError, "Wrong block witness version")
|
||||
|
||||
while t.readable:
|
||||
let metadataType = t.safeReadByte().int
|
||||
if metadataType != MetadataNothing.int:
|
||||
raise newException(ParsingError, "This tree builder support no metadata")
|
||||
|
||||
var res = treeNode(t)
|
||||
if res.usedBytes != 32:
|
||||
raise newException(ParsingError, "Buildtree should produce hash")
|
||||
|
||||
result.add KeccakHash(data: res.data)
|
||||
|
||||
proc treeNode(t: var TreeBuilder, depth: int, storageMode = false): NodeKey =
|
||||
if depth > 64:
|
||||
raise newException(ParsingError, "invalid trie structure")
|
||||
|
||||
let nodeType = safeReadEnum(t, TrieNodeType)
|
||||
case nodeType
|
||||
of BranchNodeType: result = t.branchNode(depth, storageMode)
|
||||
of ExtensionNodeType: result = t.extensionNode(depth, storageMode)
|
||||
of AccountNodeType:
|
||||
if storageMode:
|
||||
# parse account storage leaf node
|
||||
result = t.accountStorageLeafNode(depth)
|
||||
else:
|
||||
result = t.accountNode(depth)
|
||||
of HashNodeType: result = t.hashNode(depth, storageMode)
|
||||
|
||||
if depth == 0 and result.usedBytes < 32:
|
||||
result = t.forceSmallNodeKeyToHash(result)
|
||||
|
||||
proc branchNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey =
|
||||
if depth >= 64:
|
||||
raise newException(ParsingError, "invalid trie structure")
|
||||
|
||||
let mask = constructBranchMask(t.safeReadByte, t.safeReadByte)
|
||||
|
||||
when defined(debugDepth):
|
||||
let readDepth = t.safeReadByte().int
|
||||
doAssert(readDepth == depth, "branchNode " & $readDepth & " vs. " & $depth)
|
||||
|
||||
when defined(debugHash):
|
||||
var hash: NodeKey
|
||||
toKeccak(hash, t.read(32))
|
||||
|
||||
var r = initRlpList(17)
|
||||
|
||||
for i in 0 ..< 16:
|
||||
if mask.branchMaskBitIsSet(i):
|
||||
r.append t.treeNode(depth+1, storageMode)
|
||||
else:
|
||||
r.append ""
|
||||
|
||||
if branchMaskBitIsSet(mask, 16):
|
||||
raise newException(ParsingError, "The 17th elem of branch node should empty")
|
||||
|
||||
# 17th elem should always empty
|
||||
r.append ""
|
||||
|
||||
result = t.toNodeKey(r.finish)
|
||||
|
||||
when defined(debugHash):
|
||||
if result != hash:
|
||||
debugEcho "DEPTH: ", depth
|
||||
debugEcho "result: ", result.data.toHex, " vs. ", hash.data.toHex
|
||||
|
||||
func hexPrefixExtension(r: var RlpWriter, x: openArray[byte], nibblesLen: int) =
|
||||
# extension hexPrefix
|
||||
doAssert(nibblesLen >= 1 and nibblesLen <= 64)
|
||||
var bytes: array[33, byte]
|
||||
let last = nibblesLen div 2
|
||||
if (nibblesLen mod 2) == 0: # even
|
||||
bytes[0] = 0.byte
|
||||
var i = 1
|
||||
for y in x:
|
||||
bytes[i] = y
|
||||
inc i
|
||||
else: # odd
|
||||
bytes[0] = 0b0001_0000.byte or (x[0] shr 4)
|
||||
for i in 1..last:
|
||||
bytes[i] = (x[i-1] shl 4) or (x[i] shr 4)
|
||||
|
||||
r.append toOpenArray(bytes, 0, last)
|
||||
|
||||
func hexPrefixLeaf(r: var RlpWriter, x: openArray[byte], depth: int) =
|
||||
# leaf hexPrefix
|
||||
doAssert(depth >= 0 and depth <= 64)
|
||||
let nibblesLen = 64 - depth
|
||||
var bytes: array[33, byte]
|
||||
var start = depth div 2
|
||||
if (nibblesLen mod 2) == 0: # even
|
||||
bytes[0] = 0b0010_0000.byte
|
||||
else: # odd
|
||||
bytes[0] = 0b0011_0000.byte or (x[start] and 0x0F)
|
||||
inc start
|
||||
|
||||
var i = 1
|
||||
for z in start..31:
|
||||
bytes[i] = x[z]
|
||||
inc i
|
||||
|
||||
r.append toOpenArray(bytes, 0, nibblesLen div 2)
|
||||
|
||||
proc extensionNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey =
|
||||
if depth >= 63:
|
||||
raise newException(ParsingError, "invalid trie structure")
|
||||
|
||||
let nibblesLen = t.safeReadByte().int
|
||||
if nibblesLen > 64 or nibblesLen < 1:
|
||||
raise newException(ParsingError, "nibblesLen should between 1..64")
|
||||
|
||||
var r = initRlpList(2)
|
||||
let pathLen = nibblesLen div 2 + nibblesLen mod 2
|
||||
safeReadBytes(t, pathLen):
|
||||
r.hexPrefixExtension(t.read(pathLen), nibblesLen)
|
||||
|
||||
when defined(debugDepth):
|
||||
let readDepth = t.safeReadByte().int
|
||||
doAssert(readDepth == depth, "extensionNode " & $readDepth & " vs. " & $depth)
|
||||
|
||||
when defined(debugHash):
|
||||
var hash: NodeKey
|
||||
toKeccak(hash, t.read(32))
|
||||
|
||||
if nibblesLen + depth > 64 or nibblesLen + depth < 1:
|
||||
raise newException(ParsingError, "depth should between 1..64")
|
||||
|
||||
let nodeType = safeReadEnum(t, TrieNodeType)
|
||||
case nodeType
|
||||
of BranchNodeType: r.append t.branchNode(depth + nibblesLen, storageMode)
|
||||
of HashNodeType: r.append t.hashNode(depth, storageMode)
|
||||
else: raise newException(ParsingError, "wrong type during parsing child of extension node")
|
||||
|
||||
result = t.toNodeKey(r.finish)
|
||||
|
||||
when defined(debugHash):
|
||||
if result != hash:
|
||||
debugEcho "DEPTH: ", depth
|
||||
doAssert(result == hash, "EXT HASH DIFF " & result.data.toHex & " vs. " & hash.data.toHex)
|
||||
|
||||
func toAddress(x: openArray[byte]): EthAddress =
|
||||
result[0..19] = x[0..19]
|
||||
|
||||
proc readAddress(t: var TreeBuilder): Hash256 =
|
||||
safeReadBytes(t, 20):
|
||||
let address = toAddress(t.read(20))
|
||||
result = keccakHash(address)
|
||||
t.keys.add AccountAndSlots(address: address)
|
||||
|
||||
proc readCodeLen(t: var TreeBuilder): int =
|
||||
let codeLen = t.readUVarint32()
|
||||
if wfEIP170 in t.flags and codeLen > EIP170_MAX_CODE_SIZE:
|
||||
raise newException(ContractCodeError, "code len exceed EIP170 code size limit: " & $codeLen)
|
||||
t.keys[^1].codeLen = codeLen.int
|
||||
result = codeLen.int
|
||||
|
||||
proc readHashNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey =
|
||||
let nodeType = safeReadEnum(t, TrieNodeType)
|
||||
if nodeType != HashNodeType:
|
||||
raise newException(ParsingError, "hash node expected but got " & $nodeType)
|
||||
result = t.hashNode(depth, storageMode)
|
||||
|
||||
proc readByteCode(t: var TreeBuilder, acc: var Account, depth: int) =
|
||||
let bytecodeType = safeReadEnum(t, BytecodeType)
|
||||
case bytecodeType
|
||||
of CodeTouched:
|
||||
let codeLen = t.readCodeLen()
|
||||
safeReadBytes(t, codeLen):
|
||||
acc.codeHash = t.writeCode(t.read(codeLen))
|
||||
of CodeUntouched:
|
||||
# readCodeLen already save the codeLen
|
||||
# along with recovered address
|
||||
# we could discard it here
|
||||
discard t.readCodeLen()
|
||||
|
||||
let codeHash = t.readHashNode(depth, false)
|
||||
doAssert(codeHash.usedBytes == 32)
|
||||
acc.codeHash.data = codeHash.data
|
||||
|
||||
proc accountNode(t: var TreeBuilder, depth: int): NodeKey =
|
||||
if depth >= 65:
|
||||
raise newException(ParsingError, "invalid trie structure")
|
||||
|
||||
when defined(debugHash):
|
||||
let len = t.safeReadU32().int
|
||||
let node = @(t.read(len))
|
||||
let nodeKey = t.toNodeKey(node)
|
||||
|
||||
when defined(debugDepth):
|
||||
let readDepth = t.safeReadByte().int
|
||||
doAssert(readDepth == depth, "accountNode " & $readDepth & " vs. " & $depth)
|
||||
|
||||
let accountType = safeReadEnum(t, AccountType)
|
||||
let addressHash = t.readAddress()
|
||||
|
||||
var r = initRlpList(2)
|
||||
r.hexPrefixLeaf(addressHash.data, depth)
|
||||
|
||||
var acc = Account(
|
||||
balance: t.readUVarint256(),
|
||||
nonce: t.readUVarint256().truncate(AccountNonce)
|
||||
)
|
||||
|
||||
case accountType
|
||||
of SimpleAccountType:
|
||||
acc.codeHash = blankStringHash
|
||||
acc.storageRoot = emptyRlpHash
|
||||
of ExtendedAccountType:
|
||||
t.readByteCode(acc, depth)
|
||||
|
||||
# switch to account storage parsing mode
|
||||
# and reset the depth
|
||||
let storageRoot = t.treeNode(0, storageMode = true)
|
||||
doAssert(storageRoot.usedBytes == 32)
|
||||
acc.storageRoot.data = storageRoot.data
|
||||
|
||||
r.append rlp.encode(acc)
|
||||
|
||||
let nodeRes = r.finish
|
||||
result = t.toNodeKey(nodeRes)
|
||||
|
||||
when defined(debugHash):
|
||||
if result != nodeKey:
|
||||
debugEcho "Address: ", t.keys[^1].address.toHex
|
||||
debugEcho "addressHash: ", addressHash.data.toHex
|
||||
debugEcho "depth: ", depth
|
||||
debugEcho "result.usedBytes: ", result.usedBytes
|
||||
debugEcho "nodeKey.usedBytes: ", nodeKey.usedBytes
|
||||
var rlpa = rlpFromBytes(node)
|
||||
var rlpb = rlpFromBytes(nodeRes)
|
||||
debugEcho "Expected: ", inspect(rlpa)
|
||||
debugEcho "Actual: ", inspect(rlpb)
|
||||
var a = rlpa.listElem(1).toBytes.decode(Account)
|
||||
var b = rlpb.listElem(1).toBytes.decode(Account)
|
||||
debugEcho "Expected: ", a
|
||||
debugEcho "Actual: ", b
|
||||
|
||||
doAssert(result == nodeKey, "account node parsing error")
|
||||
|
||||
func toStorageSlot(x: openArray[byte]): StorageSlot =
|
||||
result[0..31] = x[0..31]
|
||||
|
||||
proc readStorageSlot(t: var TreeBuilder): Hash256 =
|
||||
safeReadBytes(t, 32):
|
||||
let slot = toStorageSlot(t.read(32))
|
||||
result = keccakHash(slot)
|
||||
t.keys[^1].slots.add slot
|
||||
|
||||
proc accountStorageLeafNode(t: var TreeBuilder, depth: int): NodeKey =
|
||||
if depth >= 65:
|
||||
raise newException(ParsingError, "invalid trie structure")
|
||||
|
||||
when defined(debugHash):
|
||||
let len = t.safeReadU32().int
|
||||
let node = @(t.read(len))
|
||||
let nodeKey = t.toNodeKey(node)
|
||||
|
||||
when defined(debugDepth):
|
||||
let readDepth = t.safeReadByte().int
|
||||
doAssert(readDepth == depth, "accountNode " & $readDepth & " vs. " & $depth)
|
||||
|
||||
var r = initRlpList(2)
|
||||
let slotHash = t.readStorageSlot()
|
||||
r.hexPrefixLeaf(slotHash.data, depth)
|
||||
|
||||
safeReadBytes(t, 32):
|
||||
let val = UInt256.fromBytesBE(t.read(32))
|
||||
r.append rlp.encode(val)
|
||||
result = t.toNodeKey(r.finish)
|
||||
|
||||
when defined(debugHash):
|
||||
doAssert(result == nodeKey, "account storage leaf node parsing error")
|
||||
|
||||
proc hashNode(t: var TreeBuilder, depth: int, storageMode: bool): NodeKey =
|
||||
if storageMode and depth >= 9:
|
||||
let z = t.safeReadByte()
|
||||
if z == ShortRlpPrefix:
|
||||
let rlpLen = t.safeReadByte().int
|
||||
if rlpLen == 0:
|
||||
safeReadBytes(t, 31):
|
||||
result.toKeccak(0, t.read(31))
|
||||
else:
|
||||
safeReadBytes(t, rlpLen):
|
||||
result = toNodeKey(t.read(rlpLen))
|
||||
else:
|
||||
safeReadBytes(t, 31):
|
||||
result.toKeccak(z, t.read(31))
|
||||
else:
|
||||
safeReadBytes(t, 32):
|
||||
result.toKeccak(t.read(32))
|
|
@ -1,375 +0,0 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stew/[byteutils, endians2],
|
||||
eth/[common, rlp],
|
||||
eth/trie/[trie_defs, nibbles],
|
||||
faststreams/outputs,
|
||||
results,
|
||||
../nimbus/constants,
|
||||
../nimbus/db/[core_db, storage_types],
|
||||
"."/[multi_keys, witness_types]
|
||||
|
||||
type
|
||||
WitnessBuilder* = object
|
||||
db*: CoreDbRef
|
||||
root: KeccakHash
|
||||
output: OutputStream
|
||||
flags: WitnessFlags
|
||||
|
||||
StackElem = object
|
||||
node: seq[byte]
|
||||
parentGroup: Group
|
||||
keys: MultiKeysRef
|
||||
depth: int
|
||||
storageMode: bool
|
||||
|
||||
proc initWitnessBuilder*(db: CoreDbRef, rootHash: KeccakHash, flags: WitnessFlags = {}): WitnessBuilder =
|
||||
result.db = db
|
||||
result.root = rootHash
|
||||
result.output = memoryOutput().s
|
||||
result.flags = flags
|
||||
|
||||
template extensionNodeKey(r: Rlp): auto =
|
||||
hexPrefixDecode r.listElem(0).toBytes
|
||||
|
||||
proc expectHash(r: Rlp): seq[byte] {.gcsafe, raises: [RlpError].} =
|
||||
result = r.toBytes
|
||||
if result.len != 32:
|
||||
raise newException(RlpTypeMismatch,
|
||||
"RLP expected to be a Keccak hash value, but has an incorrect length")
|
||||
|
||||
template getNode(elem: untyped): untyped =
|
||||
if elem.isList: @(elem.rawData)
|
||||
else: wb.db.newKvt.get(elem.expectHash).valueOr: EmptyBlob
|
||||
|
||||
proc rlpListToBitmask(r: var Rlp): uint {.gcsafe, raises: [RlpError].} =
|
||||
# only bit 1st to 16th are valid
|
||||
# the 1st bit is the rightmost bit
|
||||
var i = 0
|
||||
for branch in r:
|
||||
if not branch.isEmpty:
|
||||
result.setBranchMaskBit(i)
|
||||
inc i
|
||||
r.position = 0
|
||||
|
||||
template write(wb: var WitnessBuilder, x: untyped) =
|
||||
wb.output.write(x)
|
||||
|
||||
when defined(debugHash):
|
||||
proc writeU32Impl(wb: var WitnessBuilder, x: uint32) =
|
||||
wb.write(toBytesBE(x))
|
||||
|
||||
template writeU32(wb: var WitnessBuilder, x: untyped) =
|
||||
wb.writeU32Impl(uint32(x))
|
||||
|
||||
template writeByte(wb: var WitnessBuilder, x: untyped) =
|
||||
wb.write(byte(x))
|
||||
|
||||
proc writeUVarint(wb: var WitnessBuilder, x: SomeUnsignedInt)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# LEB128 varint encoding
|
||||
var value = x
|
||||
while true:
|
||||
var b = value and 0x7F # low order 7 bits of value
|
||||
value = value shr 7
|
||||
if value != 0: # more bytes to come
|
||||
b = b or 0x80 # set high order bit of b
|
||||
wb.writeByte(b)
|
||||
if value == 0: break
|
||||
|
||||
template writeUVarint32(wb: var WitnessBuilder, x: untyped) =
|
||||
wb.writeUVarint(uint32(x))
|
||||
|
||||
proc writeUVarint(wb: var WitnessBuilder, x: UInt256)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# LEB128 varint encoding
|
||||
var value = x
|
||||
while true:
|
||||
# we don't truncate to byte here, int will be faster
|
||||
var b = value.truncate(int) and 0x7F # low order 7 bits of value
|
||||
value = value shr 7
|
||||
if value.isZero.not: # more bytes to come
|
||||
b = b or 0x80 # set high order bit of b
|
||||
wb.writeByte(b)
|
||||
if value.isZero: break
|
||||
|
||||
proc writeNibbles(wb: var WitnessBuilder; n: NibblesSeq, withLen: bool = true)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# convert the NibblesSeq into left aligned byte seq
|
||||
# perhaps we can optimize it if the NibblesSeq already left aligned
|
||||
let nibblesLen = n.len
|
||||
let numBytes = nibblesLen div 2 + nibblesLen mod 2
|
||||
var bytes: array[32, byte]
|
||||
doAssert(nibblesLen >= 1 and nibblesLen <= 64)
|
||||
for pos in 0..<n.len:
|
||||
if (pos and 1) != 0:
|
||||
bytes[pos div 2] = bytes[pos div 2] or n[pos]
|
||||
else:
|
||||
bytes[pos div 2] = bytes[pos div 2] or (n[pos] shl 4)
|
||||
|
||||
if withLen:
|
||||
# write nibblesLen
|
||||
wb.writeByte(nibblesLen)
|
||||
# write nibbles
|
||||
wb.write(bytes.toOpenArray(0, numBytes-1))
|
||||
|
||||
proc writeExtensionNode(wb: var WitnessBuilder, n: NibblesSeq, depth: int, node: openArray[byte])
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# write type
|
||||
wb.writeByte(ExtensionNodeType)
|
||||
# write nibbles
|
||||
wb.writeNibbles(n)
|
||||
|
||||
when defined(debugDepth):
|
||||
wb.writeByte(depth)
|
||||
|
||||
when defined(debugHash):
|
||||
wb.write(keccakHash(node).data)
|
||||
|
||||
proc writeBranchNode(wb: var WitnessBuilder, mask: uint, depth: int, node: openArray[byte])
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# write type
|
||||
# branch node 17th elem should always empty
|
||||
doAssert mask.branchMaskBitIsSet(16) == false
|
||||
wb.writeByte(BranchNodeType)
|
||||
# write branch mask
|
||||
# countOnes(branch mask) >= 2 and <= 16
|
||||
wb.writeByte((mask shr 8) and 0xFF)
|
||||
wb.writeByte(mask and 0xFF)
|
||||
|
||||
when defined(debugDepth):
|
||||
wb.writeByte(depth)
|
||||
|
||||
when defined(debugHash):
|
||||
wb.write(keccakHash(node).data)
|
||||
|
||||
proc writeHashNode(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
# usually a hash node means the recursion will not go deeper
|
||||
# and the information can be represented by the hash
|
||||
# for chunked witness, a hash node can be a root to another
|
||||
# sub-trie in one of the chunks
|
||||
wb.writeByte(HashNodeType)
|
||||
if depth >= 9 and storageMode and node[0] == 0.byte:
|
||||
wb.writeByte(ShortRlpPrefix)
|
||||
wb.write(node)
|
||||
|
||||
proc writeShortRlp(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
doAssert(node.len < 32 and storageMode)
|
||||
wb.writeByte(HashNodeType)
|
||||
wb.writeByte(ShortRlpPrefix)
|
||||
wb.writeByte(node.len)
|
||||
wb.write(node)
|
||||
|
||||
proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) {.gcsafe, raises: [CatchableError].}
|
||||
|
||||
proc writeByteCode(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int)
|
||||
{.gcsafe, raises: [IOError,ContractCodeError].} =
|
||||
let kvt = wb.db.newKvt()
|
||||
if not kd.codeTouched:
|
||||
# the account have code but not touched by the EVM
|
||||
# in current block execution
|
||||
wb.writeByte(CodeUntouched)
|
||||
let code = kvt.get(contractHashKey(acc.codeHash).toOpenArray).valueOr:
|
||||
EmptyBlob
|
||||
if wfEIP170 in wb.flags and code.len > EIP170_MAX_CODE_SIZE:
|
||||
raise newException(ContractCodeError, "code len exceed EIP170 code size limit")
|
||||
wb.writeUVarint32(code.len)
|
||||
wb.writeHashNode(acc.codeHash.data, depth, false)
|
||||
# no need to write 'code' here
|
||||
return
|
||||
|
||||
wb.writeByte(CodeTouched)
|
||||
if acc.codeHash == blankStringHash:
|
||||
# no code
|
||||
wb.writeUVarint32(0'u32)
|
||||
return
|
||||
|
||||
# the account have code and the EVM use it
|
||||
let code = kvt.get(contractHashKey(acc.codeHash).toOpenArray).valueOr:
|
||||
EmptyBlob
|
||||
if wfEIP170 in wb.flags and code.len > EIP170_MAX_CODE_SIZE:
|
||||
raise newException(ContractCodeError, "code len exceed EIP170 code size limit")
|
||||
wb.writeUVarint32(code.len)
|
||||
wb.write(code)
|
||||
|
||||
proc writeStorage(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int)
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
if kd.storageKeys.isNil:
|
||||
# the account have storage but not touched by EVM
|
||||
wb.writeHashNode(acc.storageRoot.data, depth, true)
|
||||
elif acc.storageRoot != emptyRlpHash:
|
||||
# the account have storage and the EVM use it
|
||||
let node = wb.db.newKvt.get(acc.storageRoot.data).valueOr: EmptyBlob
|
||||
var zz = StackElem(
|
||||
node: node,
|
||||
parentGroup: kd.storageKeys.initGroup(),
|
||||
keys: kd.storageKeys,
|
||||
depth: 0, # set depth to zero
|
||||
storageMode: true # switch to storage mode
|
||||
)
|
||||
getBranchRecurse(wb, zz)
|
||||
else:
|
||||
# no storage at all
|
||||
wb.writeHashNode(emptyRlpHash.data, depth, true)
|
||||
|
||||
proc writeAccountNode(wb: var WitnessBuilder, kd: KeyData, acc: Account,
|
||||
node: openArray[byte], depth: int) {.raises: [ContractCodeError, IOError, CatchableError].} =
|
||||
|
||||
# write type
|
||||
wb.writeByte(AccountNodeType)
|
||||
|
||||
when defined(debugHash):
|
||||
wb.writeU32(node.len)
|
||||
wb.write(node)
|
||||
|
||||
when defined(debugDepth):
|
||||
wb.writeByte(depth)
|
||||
|
||||
var accountType = if acc.codeHash == blankStringHash and acc.storageRoot == emptyRlpHash: SimpleAccountType
|
||||
else: ExtendedAccountType
|
||||
|
||||
wb.writeByte(accountType)
|
||||
wb.write(kd.address)
|
||||
wb.writeUVarint(acc.balance)
|
||||
wb.writeUVarint(acc.nonce)
|
||||
|
||||
if accountType != SimpleAccountType:
|
||||
wb.writeByteCode(kd, acc, depth)
|
||||
wb.writeStorage(kd, acc, depth)
|
||||
|
||||
#0x00 address:<Address> balance:<Bytes32> nonce:<Bytes32>
|
||||
#0x01 address:<Address> balance:<Bytes32> nonce:<Bytes32> bytecode:<Bytecode> storage:<Tree_Node(0,1)>
|
||||
|
||||
proc writeAccountStorageLeafNode(wb: var WitnessBuilder, key: openArray[byte], val: UInt256, node: openArray[byte], depth: int)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
wb.writeByte(StorageLeafNodeType)
|
||||
|
||||
when defined(debugHash):
|
||||
wb.writeU32(node.len)
|
||||
wb.write(node)
|
||||
|
||||
when defined(debugDepth):
|
||||
wb.writeByte(depth)
|
||||
|
||||
wb.write(key)
|
||||
wb.write(val.toBytesBE)
|
||||
|
||||
#<Storage_Leaf_Node(d<65)> := key:<Bytes32> val:<Bytes32>
|
||||
|
||||
proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) =
|
||||
if z.node.len == 0: return
|
||||
if z.node.len < 32:
|
||||
writeShortRlp(wb, z.node, z.depth, z.storageMode)
|
||||
return
|
||||
|
||||
var nodeRlp = rlpFromBytes z.node
|
||||
|
||||
case nodeRlp.listLen
|
||||
of 2:
|
||||
let (isLeaf, k) = nodeRlp.extensionNodeKey
|
||||
let mg = groups(z.keys, z.depth, k, z.parentGroup)
|
||||
|
||||
if not mg.match:
|
||||
# return immediately if there is no match
|
||||
writeHashNode(wb, keccakHash(z.node).data, z.depth, z.storageMode)
|
||||
return
|
||||
|
||||
let value = nodeRlp.listElem(1)
|
||||
if not isLeaf:
|
||||
# recursion will go deeper depend on the common-prefix length nibbles
|
||||
writeExtensionNode(wb, k, z.depth, z.node)
|
||||
var zz = StackElem(
|
||||
node: value.getNode,
|
||||
parentGroup: mg.group,
|
||||
keys: z.keys,
|
||||
depth: z.depth + k.len, # increase the depth by k.len
|
||||
storageMode: z.storageMode
|
||||
)
|
||||
getBranchRecurse(wb, zz)
|
||||
return
|
||||
|
||||
# there should be only one match
|
||||
let kd = z.keys.visitMatch(mg, z.depth)
|
||||
if z.storageMode:
|
||||
doAssert(kd.storageMode)
|
||||
writeAccountStorageLeafNode(wb, kd.storageSlot, value.toBytes.decode(UInt256), z.node, z.depth)
|
||||
else:
|
||||
doAssert(not kd.storageMode)
|
||||
writeAccountNode(wb, kd, value.toBytes.decode(Account), z.node, z.depth)
|
||||
|
||||
of 17:
|
||||
let branchMask = rlpListToBitmask(nodeRlp)
|
||||
writeBranchNode(wb, branchMask, z.depth, z.node)
|
||||
|
||||
# if there is a match in any branch elem
|
||||
# 1st to 16th, the recursion will go deeper
|
||||
# by one nibble
|
||||
doAssert(z.depth != 64) # notLeaf or path.len == 0
|
||||
|
||||
let path = groups(z.keys, z.parentGroup, z.depth)
|
||||
for i in nonEmpty(branchMask):
|
||||
let branch = nodeRlp.listElem(i)
|
||||
if branchMaskBitIsSet(path.mask, i):
|
||||
# it is a match between MultiKeysRef and Branch Node elem
|
||||
var zz = StackElem(
|
||||
node: branch.getNode,
|
||||
parentGroup: path.groups[i],
|
||||
keys: z.keys,
|
||||
depth: z.depth + 1, # increase the depth by one
|
||||
storageMode: z.storageMode
|
||||
)
|
||||
getBranchRecurse(wb, zz)
|
||||
continue
|
||||
|
||||
if branch.isList:
|
||||
writeShortRlp(wb, branch.rawData, z.depth, z.storageMode)
|
||||
else:
|
||||
# if branch elem not empty and not a match, emit hash
|
||||
writeHashNode(wb, branch.expectHash, z.depth, z.storageMode)
|
||||
|
||||
# 17th elem should always empty
|
||||
# 17th elem appear in yellow paper but never in
|
||||
# the actual ethereum state trie
|
||||
# the 17th elem also not included in block witness spec
|
||||
doAssert branchMask.branchMaskBitIsSet(16) == false
|
||||
else:
|
||||
raise newException(CorruptedTrieDatabase,
|
||||
"HexaryTrie node with an unexpected number of children")
|
||||
|
||||
proc buildWitness*(wb: var WitnessBuilder, keys: MultiKeysRef): seq[byte]
|
||||
{.raises: [CatchableError].} =
|
||||
|
||||
# witness version
|
||||
wb.writeByte(BlockWitnessVersion)
|
||||
|
||||
# one or more trees
|
||||
|
||||
# we only output one big tree here
|
||||
# the condition to split the big tree into chunks of sub-tries
|
||||
# is not clear in the spec
|
||||
wb.writeByte(MetadataNothing)
|
||||
let node = wb.db.newKvt.get(wb.root.data).valueOr: EmptyBlob
|
||||
var z = StackElem(
|
||||
node: node,
|
||||
parentGroup: keys.initGroup(),
|
||||
keys: keys,
|
||||
depth: 0, # always start with a zero depth
|
||||
storageMode: false # build account witness first
|
||||
)
|
||||
getBranchRecurse(wb, z)
|
||||
|
||||
# result
|
||||
result = wb.output.getOutput(seq[byte])
|
|
@ -1,73 +0,0 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
stint,
|
||||
eth/[common, rlp],
|
||||
results,
|
||||
../nimbus/db/[core_db, state_db],
|
||||
./[tree_from_witness, witness_types]
|
||||
|
||||
export results
|
||||
|
||||
type
|
||||
BlockWitness* = seq[byte]
|
||||
|
||||
AccountData* = object
|
||||
account*: Account
|
||||
code* : seq[byte]
|
||||
storage*: Table[UInt256, UInt256]
|
||||
|
||||
proc buildAccountsTableFromKeys(
|
||||
db: ReadOnlyStateDB,
|
||||
keys: openArray[AccountAndSlots]): TableRef[EthAddress, AccountData] {.raises: [RlpError].} =
|
||||
|
||||
var accounts = newTable[EthAddress, AccountData]()
|
||||
|
||||
for key in keys:
|
||||
let account = db.getAccount(key.address)
|
||||
let code = if key.codeLen > 0:
|
||||
db.db.newKvt().get(account.codeHash.data).valueOr: EmptyBlob
|
||||
else: @[]
|
||||
var storage = Table[UInt256, UInt256]()
|
||||
|
||||
for slot in key.slots:
|
||||
let slotKey = fromBytesBE(UInt256, slot)
|
||||
let rc = db.getStorage(key.address, slotKey)
|
||||
if rc.isOK:
|
||||
storage[slotKey] = rc.value
|
||||
|
||||
accounts[key.address] = AccountData(
|
||||
account: account.to(Account),
|
||||
code: code,
|
||||
storage: storage)
|
||||
|
||||
return accounts
|
||||
|
||||
proc verifyWitness*(
|
||||
trustedStateRoot: KeccakHash,
|
||||
witness: BlockWitness,
|
||||
flags: WitnessFlags): Result[TableRef[EthAddress, AccountData], string] =
|
||||
if witness.len() == 0:
|
||||
return err("witness is empty")
|
||||
|
||||
let db = newCoreDbRef(AristoDbMemory) # `AristoDbVoid` has smaller footprint
|
||||
var tb = initTreeBuilder(witness, db, flags)
|
||||
|
||||
try:
|
||||
let stateRoot = tb.buildTree()
|
||||
if stateRoot != trustedStateRoot:
|
||||
return err("witness stateRoot doesn't match trustedStateRoot")
|
||||
|
||||
let ac = newAccountStateDB(db, trustedStateRoot)
|
||||
let accounts = buildAccountsTableFromKeys(ReadOnlyStateDB(ac), tb.keys)
|
||||
ok(accounts)
|
||||
except Exception as e:
|
||||
err(e.msg)
|
Loading…
Reference in New Issue