mirror of https://github.com/status-im/nim-eth.git
assert() -> doAssert()
This commit is contained in:
parent
f15cbbae90
commit
9146e8e2f0
|
@ -18,10 +18,10 @@ To see the bloom filter used in the context of Ethereum, please refer to the [Et
|
||||||
import eth/bloom, stint
|
import eth/bloom, stint
|
||||||
var f: BloomFilter
|
var f: BloomFilter
|
||||||
f.incl("test1")
|
f.incl("test1")
|
||||||
assert("test1" in f)
|
doAssert("test1" in f)
|
||||||
assert("test2" notin f)
|
doAssert("test2" notin f)
|
||||||
f.incl("test2")
|
f.incl("test2")
|
||||||
assert("test2" in f)
|
doAssert("test2" in f)
|
||||||
assert(f.value.toHex == "80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000200000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000")
|
doAssert(f.value.toHex == "80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000200000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
42
doc/trie.md
42
doc/trie.md
|
@ -103,17 +103,17 @@ var db = newMemoryDB()
|
||||||
var trie = initBinaryTrie(db)
|
var trie = initBinaryTrie(db)
|
||||||
trie.set("key1", "value1")
|
trie.set("key1", "value1")
|
||||||
trie.set("key2", "value2")
|
trie.set("key2", "value2")
|
||||||
assert trie.get("key1") == "value1".toRange
|
doAssert trie.get("key1") == "value1".toRange
|
||||||
assert trie.get("key2") == "value2".toRange
|
doAssert trie.get("key2") == "value2".toRange
|
||||||
|
|
||||||
# delete all subtrie with key prefixes "key"
|
# delete all subtrie with key prefixes "key"
|
||||||
trie.deleteSubtrie("key")
|
trie.deleteSubtrie("key")
|
||||||
assert trie.get("key1") == zeroBytesRange
|
doAssert trie.get("key1") == zeroBytesRange
|
||||||
assert trie.get("key2") == zeroBytesRange
|
doAssert trie.get("key2") == zeroBytesRange
|
||||||
|
|
||||||
trie["moon"] = "sun"
|
trie["moon"] = "sun"
|
||||||
assert "moon" in trie
|
doAssert "moon" in trie
|
||||||
assert trie["moon"] == "sun".toRange
|
doAssert trie["moon"] == "sun".toRange
|
||||||
```
|
```
|
||||||
|
|
||||||
Remember, `set` and `get` are trie operations. A single `set` operation may invoke
|
Remember, `set` and `get` are trie operations. A single `set` operation may invoke
|
||||||
|
@ -160,10 +160,10 @@ var trie = initBinaryTrie(db)
|
||||||
trie.set("key1", "value1")
|
trie.set("key1", "value1")
|
||||||
trie.set("key2", "value2")
|
trie.set("key2", "value2")
|
||||||
|
|
||||||
assert checkIfBranchExist(db, trie.getRootHash(), "key") == true
|
doAssert checkIfBranchExist(db, trie.getRootHash(), "key") == true
|
||||||
assert checkIfBranchExist(db, trie.getRootHash(), "key1") == true
|
doAssert checkIfBranchExist(db, trie.getRootHash(), "key1") == true
|
||||||
assert checkIfBranchExist(db, trie.getRootHash(), "ken") == false
|
doAssert checkIfBranchExist(db, trie.getRootHash(), "ken") == false
|
||||||
assert checkIfBranchExist(db, trie.getRootHash(), "key123") == false
|
doAssert checkIfBranchExist(db, trie.getRootHash(), "key123") == false
|
||||||
```
|
```
|
||||||
|
|
||||||
The tree will looks like:
|
The tree will looks like:
|
||||||
|
@ -190,11 +190,11 @@ var branchA = getBranch(db, trie.getRootHash(), "key1")
|
||||||
var branchB = getBranch(db, trie.getRootHash(), "key2")
|
var branchB = getBranch(db, trie.getRootHash(), "key2")
|
||||||
# ==> [A, B, C2, D2]
|
# ==> [A, B, C2, D2]
|
||||||
|
|
||||||
assert isValidBranch(branchA, trie.getRootHash(), "key1", "value1") == true
|
doAssert isValidBranch(branchA, trie.getRootHash(), "key1", "value1") == true
|
||||||
# wrong key, return zero bytes
|
# wrong key, return zero bytes
|
||||||
assert isValidBranch(branchA, trie.getRootHash(), "key5", "") == true
|
doAssert isValidBranch(branchA, trie.getRootHash(), "key5", "") == true
|
||||||
|
|
||||||
assert isValidBranch(branchB, trie.getRootHash(), "key1", "value1") # InvalidNode
|
doAssert isValidBranch(branchB, trie.getRootHash(), "key1", "value1") # InvalidNode
|
||||||
|
|
||||||
var x = getBranch(db, trie.getRootHash(), "key")
|
var x = getBranch(db, trie.getRootHash(), "key")
|
||||||
# ==> [A]
|
# ==> [A]
|
||||||
|
@ -218,7 +218,7 @@ var wholeTrie = getWitness(db, trie.getRootHash(), "")
|
||||||
var node = branch[1] # B
|
var node = branch[1] # B
|
||||||
let nodeHash = keccak256.digest(node.baseAddr, uint(node.len))
|
let nodeHash = keccak256.digest(node.baseAddr, uint(node.len))
|
||||||
var nodes = getTrieNodes(db, nodeHash)
|
var nodes = getTrieNodes(db, nodeHash)
|
||||||
assert nodes.len == wholeTrie.len - 1
|
doAssert nodes.len == wholeTrie.len - 1
|
||||||
# ==> [B, C1, D1, C2, D2]
|
# ==> [B, C1, D1, C2, D2]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -303,14 +303,14 @@ let
|
||||||
|
|
||||||
trie.set(key1, "value1")
|
trie.set(key1, "value1")
|
||||||
trie.set(key2, "value2")
|
trie.set(key2, "value2")
|
||||||
assert trie.get(key1) == "value1".toRange
|
doAssert trie.get(key1) == "value1".toRange
|
||||||
assert trie.get(key2) == "value2".toRange
|
doAssert trie.get(key2) == "value2".toRange
|
||||||
|
|
||||||
trie.delete(key1)
|
trie.delete(key1)
|
||||||
assert trie.get(key1) == zeroBytesRange
|
doAssert trie.get(key1) == zeroBytesRange
|
||||||
|
|
||||||
trie.delete(key2)
|
trie.delete(key2)
|
||||||
assert trie[key2] == zeroBytesRange
|
doAssert trie[key2] == zeroBytesRange
|
||||||
```
|
```
|
||||||
|
|
||||||
Remember, `set` and `get` are trie operations. A single `set` operation may invoke
|
Remember, `set` and `get` are trie operations. A single `set` operation may invoke
|
||||||
|
@ -331,8 +331,8 @@ Using ``prove`` dan ``verifyProof`` API, we can do some merkling with SMT.
|
||||||
trie[key1] = value1
|
trie[key1] = value1
|
||||||
var proof = trie.prove(key1)
|
var proof = trie.prove(key1)
|
||||||
|
|
||||||
assert verifyProof(proof, trie.getRootHash(), key1, value1) == true
|
doAssert verifyProof(proof, trie.getRootHash(), key1, value1) == true
|
||||||
assert verifyProof(proof, trie.getRootHash(), key1, badValue) == false
|
doAssert verifyProof(proof, trie.getRootHash(), key1, badValue) == false
|
||||||
assert verifyProof(proof, trie.getRootHash(), key2, value1) == false
|
doAssert verifyProof(proof, trie.getRootHash(), key2, value1) == false
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -215,11 +215,11 @@ proc hasStateRoot*(rec: Receipt): bool {.inline.} =
|
||||||
rec.stateRootOrStatus.isHash == true
|
rec.stateRootOrStatus.isHash == true
|
||||||
|
|
||||||
proc stateRoot*(rec: Receipt): Hash256 {.inline.} =
|
proc stateRoot*(rec: Receipt): Hash256 {.inline.} =
|
||||||
assert(rec.hasStateRoot)
|
doAssert(rec.hasStateRoot)
|
||||||
rec.stateRootOrStatus.hash
|
rec.stateRootOrStatus.hash
|
||||||
|
|
||||||
proc status*(rec: Receipt): int {.inline.} =
|
proc status*(rec: Receipt): int {.inline.} =
|
||||||
assert(rec.hasStatus)
|
doAssert(rec.hasStatus)
|
||||||
if rec.stateRootOrStatus.status: 1 else: 0
|
if rec.stateRootOrStatus.status: 1 else: 0
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -273,7 +273,7 @@ proc append*(rlpWriter: var RlpWriter, t: Transaction, a: EthAddress) {.inline.}
|
||||||
rlpWriter.append(a)
|
rlpWriter.append(a)
|
||||||
|
|
||||||
proc read*(rlp: var Rlp, T: typedesc[HashOrStatus]): T {.inline.} =
|
proc read*(rlp: var Rlp, T: typedesc[HashOrStatus]): T {.inline.} =
|
||||||
assert(rlp.blobLen() == 32 or rlp.blobLen() == 1)
|
doAssert(rlp.blobLen() == 32 or rlp.blobLen() == 1)
|
||||||
if rlp.blobLen == 1:
|
if rlp.blobLen == 1:
|
||||||
result = hashOrStatus(rlp.read(uint8) == 1)
|
result = hashOrStatus(rlp.read(uint8) == 1)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -226,22 +226,22 @@ proc getRaw*(pubkey: PublicKey): array[RawPublicKeySize, byte] {.noinit.} =
|
||||||
addr length, unsafeAddr pubkey,
|
addr length, unsafeAddr pubkey,
|
||||||
SECP256K1_EC_UNCOMPRESSED) != 1:
|
SECP256K1_EC_UNCOMPRESSED) != 1:
|
||||||
raiseSecp256k1Error()
|
raiseSecp256k1Error()
|
||||||
assert(length == RawPublicKeySize + 1)
|
doAssert(length == RawPublicKeySize + 1)
|
||||||
assert(key[0] == 0x04'u8)
|
doAssert(key[0] == 0x04'u8)
|
||||||
copyMem(addr result[0], addr key[1], RawPublicKeySize)
|
copyMem(addr result[0], addr key[1], RawPublicKeySize)
|
||||||
|
|
||||||
proc toRaw*(pubkey: PublicKey, data: var openarray[byte]) =
|
proc toRaw*(pubkey: PublicKey, data: var openarray[byte]) =
|
||||||
## Converts public key `pubkey` to serialized form and store it in `data`.
|
## Converts public key `pubkey` to serialized form and store it in `data`.
|
||||||
var key: array[RawPublicKeySize + 1, byte]
|
var key: array[RawPublicKeySize + 1, byte]
|
||||||
assert(len(data) >= RawPublicKeySize)
|
doAssert(len(data) >= RawPublicKeySize)
|
||||||
var length = csize(sizeof(key))
|
var length = csize(sizeof(key))
|
||||||
let ctx = getSecpContext()
|
let ctx = getSecpContext()
|
||||||
if secp256k1_ec_pubkey_serialize(ctx, cast[ptr cuchar](addr key),
|
if secp256k1_ec_pubkey_serialize(ctx, cast[ptr cuchar](addr key),
|
||||||
addr length, unsafeAddr pubkey,
|
addr length, unsafeAddr pubkey,
|
||||||
SECP256K1_EC_UNCOMPRESSED) != 1:
|
SECP256K1_EC_UNCOMPRESSED) != 1:
|
||||||
raiseSecp256k1Error()
|
raiseSecp256k1Error()
|
||||||
assert(length == RawPublicKeySize + 1)
|
doAssert(length == RawPublicKeySize + 1)
|
||||||
assert(key[0] == 0x04'u8)
|
doAssert(key[0] == 0x04'u8)
|
||||||
copyMem(addr data[0], addr key[1], RawPublicKeySize)
|
copyMem(addr data[0], addr key[1], RawPublicKeySize)
|
||||||
|
|
||||||
proc getRaw*(s: Signature): array[RawSignatureSize, byte] {.noinit.} =
|
proc getRaw*(s: Signature): array[RawSignatureSize, byte] {.noinit.} =
|
||||||
|
@ -257,7 +257,7 @@ proc toRaw*(s: Signature, data: var openarray[byte]) =
|
||||||
## Converts signature `s` to serialized form and store it in `data`.
|
## Converts signature `s` to serialized form and store it in `data`.
|
||||||
let ctx = getSecpContext()
|
let ctx = getSecpContext()
|
||||||
var recid = cint(0)
|
var recid = cint(0)
|
||||||
assert(len(data) >= RawSignatureSize)
|
doAssert(len(data) >= RawSignatureSize)
|
||||||
if secp256k1_ecdsa_recoverable_signature_serialize_compact(
|
if secp256k1_ecdsa_recoverable_signature_serialize_compact(
|
||||||
ctx, cast[ptr cuchar](addr data[0]), addr recid, unsafeAddr s) != 1:
|
ctx, cast[ptr cuchar](addr data[0]), addr recid, unsafeAddr s) != 1:
|
||||||
raiseSecp256k1Error()
|
raiseSecp256k1Error()
|
||||||
|
|
|
@ -19,7 +19,7 @@ export
|
||||||
p2p_types, rlpx, enode, kademlia
|
p2p_types, rlpx, enode, kademlia
|
||||||
|
|
||||||
proc addCapability*(node: var EthereumNode, p: ProtocolInfo) =
|
proc addCapability*(node: var EthereumNode, p: ProtocolInfo) =
|
||||||
assert node.connectionState == ConnectionState.None
|
doAssert node.connectionState == ConnectionState.None
|
||||||
|
|
||||||
let pos = lowerBound(node.protocols, p, rlpx.cmp)
|
let pos = lowerBound(node.protocols, p, rlpx.cmp)
|
||||||
node.protocols.insert(p, pos)
|
node.protocols.insert(p, pos)
|
||||||
|
@ -95,7 +95,7 @@ proc connectToNetwork*(node: EthereumNode,
|
||||||
bootstrapNodes: seq[ENode],
|
bootstrapNodes: seq[ENode],
|
||||||
startListening = true,
|
startListening = true,
|
||||||
enableDiscovery = true) {.async.} =
|
enableDiscovery = true) {.async.} =
|
||||||
assert node.connectionState == ConnectionState.None
|
doAssert node.connectionState == ConnectionState.None
|
||||||
|
|
||||||
node.connectionState = Connecting
|
node.connectionState = Connecting
|
||||||
node.discovery = newDiscoveryProtocol(node.keys.seckey,
|
node.discovery = newDiscoveryProtocol(node.keys.seckey,
|
||||||
|
|
|
@ -85,7 +85,7 @@ template toa(a, b, c: untyped): untyped =
|
||||||
toOpenArray((a), (b), (b) + (c) - 1)
|
toOpenArray((a), (b), (b) + (c) - 1)
|
||||||
|
|
||||||
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
||||||
assert(len(a) == len(b))
|
doAssert(len(a) == len(b))
|
||||||
for i in 0 ..< len(a):
|
for i in 0 ..< len(a):
|
||||||
a[i] = a[i] xor b[i]
|
a[i] = a[i] xor b[i]
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ proc authMessageEIP8(h: var Handshake,
|
||||||
buffer: array[PlainAuthMessageMaxEIP8, byte]
|
buffer: array[PlainAuthMessageMaxEIP8, byte]
|
||||||
padsize: byte
|
padsize: byte
|
||||||
|
|
||||||
assert(EIP8 in h.flags)
|
doAssert(EIP8 in h.flags)
|
||||||
outlen = 0
|
outlen = 0
|
||||||
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
return(EcdhError)
|
return(EcdhError)
|
||||||
|
@ -174,7 +174,7 @@ proc authMessageEIP8(h: var Handshake,
|
||||||
h.host.pubkey.getRaw(),
|
h.host.pubkey.getRaw(),
|
||||||
h.initiatorNonce,
|
h.initiatorNonce,
|
||||||
[byte(h.version)])
|
[byte(h.version)])
|
||||||
assert(len(payload) == PlainAuthMessageEIP8Length)
|
doAssert(len(payload) == PlainAuthMessageEIP8Length)
|
||||||
let pencsize = eciesEncryptedLength(len(payload))
|
let pencsize = eciesEncryptedLength(len(payload))
|
||||||
while true:
|
while true:
|
||||||
if randomBytes(addr padsize, 1) != 1:
|
if randomBytes(addr padsize, 1) != 1:
|
||||||
|
@ -240,11 +240,11 @@ proc ackMessageEIP8(h: var Handshake,
|
||||||
var
|
var
|
||||||
buffer: array[PlainAckMessageMaxEIP8, byte]
|
buffer: array[PlainAckMessageMaxEIP8, byte]
|
||||||
padsize: byte
|
padsize: byte
|
||||||
assert(EIP8 in h.flags)
|
doAssert(EIP8 in h.flags)
|
||||||
var payload = rlp.encodeList(h.ephemeral.pubkey.getRaw(),
|
var payload = rlp.encodeList(h.ephemeral.pubkey.getRaw(),
|
||||||
h.responderNonce,
|
h.responderNonce,
|
||||||
[byte(h.version)])
|
[byte(h.version)])
|
||||||
assert(len(payload) == PlainAckMessageEIP8Length)
|
doAssert(len(payload) == PlainAckMessageEIP8Length)
|
||||||
outlen = 0
|
outlen = 0
|
||||||
let pencsize = eciesEncryptedLength(len(payload))
|
let pencsize = eciesEncryptedLength(len(payload))
|
||||||
while true:
|
while true:
|
||||||
|
@ -319,7 +319,7 @@ proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
secret: SharedSecret
|
secret: SharedSecret
|
||||||
buffer: array[PlainAuthMessageV4Length, byte]
|
buffer: array[PlainAuthMessageV4Length, byte]
|
||||||
pubkey: PublicKey
|
pubkey: PublicKey
|
||||||
assert(Responder in h.flags)
|
doAssert(Responder in h.flags)
|
||||||
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
||||||
return(EciesError)
|
return(EciesError)
|
||||||
var header = cast[ptr AuthMessageV4](addr buffer[0])
|
var header = cast[ptr AuthMessageV4](addr buffer[0])
|
||||||
|
@ -424,7 +424,7 @@ proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
## Decodes V4 AckMessage.
|
## Decodes V4 AckMessage.
|
||||||
var
|
var
|
||||||
buffer: array[PlainAckMessageV4Length, byte]
|
buffer: array[PlainAckMessageV4Length, byte]
|
||||||
assert(Initiator in h.flags)
|
doAssert(Initiator in h.flags)
|
||||||
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
||||||
return(EciesError)
|
return(EciesError)
|
||||||
var header = cast[ptr AckMessageV4](addr buffer[0])
|
var header = cast[ptr AckMessageV4](addr buffer[0])
|
||||||
|
|
|
@ -292,9 +292,9 @@ when isMainModule:
|
||||||
doAssert(recoverMsgPublicKey(m, remotePubkey))
|
doAssert(recoverMsgPublicKey(m, remotePubkey))
|
||||||
|
|
||||||
let (cmdId, payload) = unpack(m)
|
let (cmdId, payload) = unpack(m)
|
||||||
assert(payload == hexToSeqByte"f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2")
|
doAssert(payload == hexToSeqByte"f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2")
|
||||||
assert(cmdId == cmdPong)
|
doAssert(cmdId == cmdPong)
|
||||||
assert(remotePubkey == initPublicKey("78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d"))
|
doAssert(remotePubkey == initPublicKey("78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d"))
|
||||||
|
|
||||||
let privKey = initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
|
let privKey = initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ when isMainModule:
|
||||||
# var b = @[1.byte, 2, 3]
|
# var b = @[1.byte, 2, 3]
|
||||||
# let m = pack(cmdPing, b.initBytesRange, privKey)
|
# let m = pack(cmdPing, b.initBytesRange, privKey)
|
||||||
# let (remotePubkey, cmdId, payload) = unpack(m)
|
# let (remotePubkey, cmdId, payload) = unpack(m)
|
||||||
# assert(remotePubkey.raw_key.toHex == privKey.public_key.raw_key.toHex)
|
# doAssert(remotePubkey.raw_key.toHex == privKey.public_key.raw_key.toHex)
|
||||||
|
|
||||||
var bootnodes = newSeq[ENode]()
|
var bootnodes = newSeq[ENode]()
|
||||||
for item in LOCAL_BOOTNODES:
|
for item in LOCAL_BOOTNODES:
|
||||||
|
|
|
@ -170,7 +170,7 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
||||||
for j in 1 .. nodes.high:
|
for j in 1 .. nodes.high:
|
||||||
if (nodes[j].id and mask) != reference: return i - 1
|
if (nodes[j].id and mask) != reference: return i - 1
|
||||||
|
|
||||||
assert(false, "Unable to calculate number of shared prefix bits")
|
doAssert(false, "Unable to calculate number of shared prefix bits")
|
||||||
|
|
||||||
proc init(r: var RoutingTable, thisNode: Node) {.inline.} =
|
proc init(r: var RoutingTable, thisNode: Node) {.inline.} =
|
||||||
r.thisNode = thisNode
|
r.thisNode = thisNode
|
||||||
|
@ -189,7 +189,7 @@ proc removeNode(r: var RoutingTable, n: Node) =
|
||||||
r.bucketForNode(n).removeNode(n)
|
r.bucketForNode(n).removeNode(n)
|
||||||
|
|
||||||
proc addNode(r: var RoutingTable, n: Node): Node =
|
proc addNode(r: var RoutingTable, n: Node): Node =
|
||||||
assert(n != r.thisNode)
|
doAssert(n != r.thisNode)
|
||||||
let bucket = r.bucketForNode(n)
|
let bucket = r.bucketForNode(n)
|
||||||
let evictionCandidate = bucket.add(n)
|
let evictionCandidate = bucket.add(n)
|
||||||
if not evictionCandidate.isNil:
|
if not evictionCandidate.isNil:
|
||||||
|
@ -262,7 +262,7 @@ proc pingId(n: Node, token: seq[byte]): seq[byte] {.inline.} =
|
||||||
result = token & @(n.node.pubkey.data)
|
result = token & @(n.node.pubkey.data)
|
||||||
|
|
||||||
proc waitPong(k: KademliaProtocol, n: Node, pingid: seq[byte]): Future[bool] =
|
proc waitPong(k: KademliaProtocol, n: Node, pingid: seq[byte]): Future[bool] =
|
||||||
assert(pingid notin k.pongFutures, "Already waiting for pong from " & $n)
|
doAssert(pingid notin k.pongFutures, "Already waiting for pong from " & $n)
|
||||||
result = newFuture[bool]("waitPong")
|
result = newFuture[bool]("waitPong")
|
||||||
let fut = result
|
let fut = result
|
||||||
k.pongFutures[pingid] = result
|
k.pongFutures[pingid] = result
|
||||||
|
@ -272,12 +272,12 @@ proc waitPong(k: KademliaProtocol, n: Node, pingid: seq[byte]): Future[bool] =
|
||||||
fut.complete(false)
|
fut.complete(false)
|
||||||
|
|
||||||
proc ping(k: KademliaProtocol, n: Node): seq[byte] =
|
proc ping(k: KademliaProtocol, n: Node): seq[byte] =
|
||||||
assert(n != k.thisNode)
|
doAssert(n != k.thisNode)
|
||||||
result = k.wire.sendPing(n)
|
result = k.wire.sendPing(n)
|
||||||
|
|
||||||
proc waitPing(k: KademliaProtocol, n: Node): Future[bool] =
|
proc waitPing(k: KademliaProtocol, n: Node): Future[bool] =
|
||||||
result = newFuture[bool]("waitPing")
|
result = newFuture[bool]("waitPing")
|
||||||
assert(n notin k.pingFutures)
|
doAssert(n notin k.pingFutures)
|
||||||
k.pingFutures[n] = result
|
k.pingFutures[n] = result
|
||||||
let fut = result
|
let fut = result
|
||||||
onTimeout:
|
onTimeout:
|
||||||
|
@ -286,7 +286,7 @@ proc waitPing(k: KademliaProtocol, n: Node): Future[bool] =
|
||||||
fut.complete(false)
|
fut.complete(false)
|
||||||
|
|
||||||
proc waitNeighbours(k: KademliaProtocol, remote: Node): Future[seq[Node]] =
|
proc waitNeighbours(k: KademliaProtocol, remote: Node): Future[seq[Node]] =
|
||||||
assert(remote notin k.neighboursCallbacks)
|
doAssert(remote notin k.neighboursCallbacks)
|
||||||
result = newFuture[seq[Node]]("waitNeighbours")
|
result = newFuture[seq[Node]]("waitNeighbours")
|
||||||
let fut = result
|
let fut = result
|
||||||
var neighbours = newSeqOfCap[Node](BUCKET_SIZE)
|
var neighbours = newSeqOfCap[Node](BUCKET_SIZE)
|
||||||
|
@ -300,7 +300,7 @@ proc waitNeighbours(k: KademliaProtocol, remote: Node): Future[seq[Node]] =
|
||||||
neighbours.add(i)
|
neighbours.add(i)
|
||||||
if neighbours.len == BUCKET_SIZE:
|
if neighbours.len == BUCKET_SIZE:
|
||||||
k.neighboursCallbacks.del(remote)
|
k.neighboursCallbacks.del(remote)
|
||||||
assert(not fut.finished)
|
doAssert(not fut.finished)
|
||||||
fut.complete(neighbours)
|
fut.complete(neighbours)
|
||||||
|
|
||||||
onTimeout:
|
onTimeout:
|
||||||
|
|
|
@ -31,7 +31,7 @@ proc nodesToConnect(p: PeerPool): seq[Node] {.inline.} =
|
||||||
p.discovery.randomNodes(p.minPeers).filterIt(it notin p.discovery.bootstrapNodes)
|
p.discovery.randomNodes(p.minPeers).filterIt(it notin p.discovery.bootstrapNodes)
|
||||||
|
|
||||||
proc addObserver(p: PeerPool, observerId: int, observer: PeerObserver) =
|
proc addObserver(p: PeerPool, observerId: int, observer: PeerObserver) =
|
||||||
assert(observerId notin p.observers)
|
doAssert(observerId notin p.observers)
|
||||||
p.observers[observerId] = observer
|
p.observers[observerId] = observer
|
||||||
if not observer.onPeerConnected.isNil:
|
if not observer.onPeerConnected.isNil:
|
||||||
for peer in p.connectedNodes.values:
|
for peer in p.connectedNodes.values:
|
||||||
|
|
|
@ -311,7 +311,7 @@ proc registerRequest*(peer: Peer,
|
||||||
timeoutAt: timeoutAt)
|
timeoutAt: timeoutAt)
|
||||||
peer.outstandingRequests[responseMsgId].addLast req
|
peer.outstandingRequests[responseMsgId].addLast req
|
||||||
|
|
||||||
assert(not peer.dispatcher.isNil)
|
doAssert(not peer.dispatcher.isNil)
|
||||||
let requestResolver = peer.dispatcher.messages[responseMsgId].requestResolver
|
let requestResolver = peer.dispatcher.messages[responseMsgId].requestResolver
|
||||||
proc timeoutExpired(udata: pointer) = requestResolver(nil, responseFuture)
|
proc timeoutExpired(udata: pointer) = requestResolver(nil, responseFuture)
|
||||||
|
|
||||||
|
@ -372,7 +372,7 @@ proc resolveResponseFuture(peer: Peer, msgId: int, msg: pointer, reqId: int) =
|
||||||
template req: auto = outstandingReqs()[idx]
|
template req: auto = outstandingReqs()[idx]
|
||||||
|
|
||||||
if req.future.finished:
|
if req.future.finished:
|
||||||
assert req.timeoutAt <= fastEpochTime()
|
doAssert req.timeoutAt <= fastEpochTime()
|
||||||
# Here we'll remove the expired request by swapping
|
# Here we'll remove the expired request by swapping
|
||||||
# it with the last one in the deque (if necessary):
|
# it with the last one in the deque (if necessary):
|
||||||
if idx != outstandingReqs.len - 1:
|
if idx != outstandingReqs.len - 1:
|
||||||
|
@ -601,7 +601,7 @@ macro p2pProtocolImpl(name: static[string],
|
||||||
linkSendFailureToReqFuture = bindSym "linkSendFailureToReqFuture"
|
linkSendFailureToReqFuture = bindSym "linkSendFailureToReqFuture"
|
||||||
|
|
||||||
# By convention, all Ethereum protocol names must be abbreviated to 3 letters
|
# By convention, all Ethereum protocol names must be abbreviated to 3 letters
|
||||||
assert shortName.len == 3
|
doAssert shortName.len == 3
|
||||||
|
|
||||||
template applyDecorator(p: NimNode, decorator: NimNode) =
|
template applyDecorator(p: NimNode, decorator: NimNode) =
|
||||||
if decorator.kind != nnkNilLit: p.addPragma decorator
|
if decorator.kind != nnkNilLit: p.addPragma decorator
|
||||||
|
|
|
@ -235,7 +235,7 @@ proc enlistInFlowControl*(network: LesNetwork,
|
||||||
peerRechargingPower = 100) =
|
peerRechargingPower = 100) =
|
||||||
let t = now()
|
let t = now()
|
||||||
|
|
||||||
assert peer.isServer or peer.isClient
|
doAssert peer.isServer or peer.isClient
|
||||||
# Each Peer must be potential communication partner for us.
|
# Each Peer must be potential communication partner for us.
|
||||||
# There will be useless peers on the network, but the logic
|
# There will be useless peers on the network, but the logic
|
||||||
# should make sure to disconnect them earlier in `onPeerConnected`.
|
# should make sure to disconnect them earlier in `onPeerConnected`.
|
||||||
|
|
|
@ -92,7 +92,7 @@ proc getCostQuantity(fn: NimNode): tuple[quantityExpr, maxQuantity: NimNode] =
|
||||||
# XXX: `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
# XXX: `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
||||||
# (TODO: file as an issue)
|
# (TODO: file as an issue)
|
||||||
let p = fn.pragma
|
let p = fn.pragma
|
||||||
assert p.kind == nnkPragma and p.len > 0 and $p[0][0] == "costQuantity"
|
doAssert p.kind == nnkPragma and p.len > 0 and $p[0][0] == "costQuantity"
|
||||||
|
|
||||||
result.quantityExpr = p[0][1]
|
result.quantityExpr = p[0][1]
|
||||||
result.maxQuantity= p[0][2]
|
result.maxQuantity= p[0][2]
|
||||||
|
|
|
@ -143,7 +143,7 @@ proc fromLE32(v: array[4, byte]): uint32 =
|
||||||
proc leadingZeroBits(hash: MDigest): int =
|
proc leadingZeroBits(hash: MDigest): int =
|
||||||
## Number of most significant zero bits before the first one
|
## Number of most significant zero bits before the first one
|
||||||
for h in hash.data:
|
for h in hash.data:
|
||||||
static: assert sizeof(h) == 1
|
static: doAssert sizeof(h) == 1
|
||||||
if h == 0:
|
if h == 0:
|
||||||
result += 8
|
result += 8
|
||||||
else:
|
else:
|
||||||
|
@ -167,7 +167,7 @@ proc topicBloom*(topic: Topic): Bloom =
|
||||||
if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte
|
if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte
|
||||||
idx = idx + 256
|
idx = idx + 256
|
||||||
|
|
||||||
assert idx <= 511
|
doAssert idx <= 511
|
||||||
result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16))
|
result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16))
|
||||||
|
|
||||||
proc generateRandomID(): string =
|
proc generateRandomID(): string =
|
||||||
|
@ -182,7 +182,7 @@ proc `or`(a, b: Bloom): Bloom =
|
||||||
result[i] = a[i] or b[i]
|
result[i] = a[i] or b[i]
|
||||||
|
|
||||||
proc bytesCopy(bloom: var Bloom, b: Bytes) =
|
proc bytesCopy(bloom: var Bloom, b: Bytes) =
|
||||||
assert b.len == bloomSize
|
doAssert b.len == bloomSize
|
||||||
copyMem(addr bloom[0], unsafeAddr b[0], bloomSize)
|
copyMem(addr bloom[0], unsafeAddr b[0], bloomSize)
|
||||||
|
|
||||||
proc toBloom*(topics: openArray[Topic]): Bloom =
|
proc toBloom*(topics: openArray[Topic]): Bloom =
|
||||||
|
|
|
@ -48,7 +48,7 @@ template toa(a, b, c: untyped): untyped =
|
||||||
toOpenArray((a), (b), (b) + (c) - 1)
|
toOpenArray((a), (b), (b) + (c) - 1)
|
||||||
|
|
||||||
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
||||||
assert(len(a) == len(b))
|
doAssert(len(a) == len(b))
|
||||||
for i in 0 ..< len(a):
|
for i in 0 ..< len(a):
|
||||||
a[i] = a[i] xor b[i]
|
a[i] = a[i] xor b[i]
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ proc encryptMsg*(msg: openarray[byte], secrets: var SecretState): seq[byte] =
|
||||||
# This would be safer if we use a thread-local sequ for the temporary buffer
|
# This would be safer if we use a thread-local sequ for the temporary buffer
|
||||||
result = newSeq[byte](encryptedLength(msg.len))
|
result = newSeq[byte](encryptedLength(msg.len))
|
||||||
let s = encrypt(secrets, header, msg, result)
|
let s = encrypt(secrets, header, msg, result)
|
||||||
assert s == Success
|
doAssert s == Success
|
||||||
|
|
||||||
proc getBodySize*(a: RlpxHeader): int =
|
proc getBodySize*(a: RlpxHeader): int =
|
||||||
(int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2])
|
(int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2])
|
||||||
|
|
10
eth/rlp.nim
10
eth/rlp.nim
|
@ -100,7 +100,7 @@ proc isSingleByte*(self: Rlp): bool =
|
||||||
hasData() and bytes[position] < BLOB_START_MARKER
|
hasData() and bytes[position] < BLOB_START_MARKER
|
||||||
|
|
||||||
proc getByteValue*(self: Rlp): byte =
|
proc getByteValue*(self: Rlp): byte =
|
||||||
assert self.isSingleByte()
|
doAssert self.isSingleByte()
|
||||||
return bytes[position]
|
return bytes[position]
|
||||||
|
|
||||||
proc payloadOffset(self: Rlp): int =
|
proc payloadOffset(self: Rlp): int =
|
||||||
|
@ -233,7 +233,7 @@ proc toBytes*(self: Rlp): BytesRange =
|
||||||
result = bytes.slice(ibegin, iend)
|
result = bytes.slice(ibegin, iend)
|
||||||
|
|
||||||
proc currentElemEnd(self: Rlp): int =
|
proc currentElemEnd(self: Rlp): int =
|
||||||
assert hasData()
|
doAssert hasData()
|
||||||
result = position
|
result = position
|
||||||
|
|
||||||
if isSingleByte():
|
if isSingleByte():
|
||||||
|
@ -242,14 +242,14 @@ proc currentElemEnd(self: Rlp): int =
|
||||||
result += payloadOffset() + payloadBytesCount()
|
result += payloadOffset() + payloadBytesCount()
|
||||||
|
|
||||||
proc enterList*(self: var Rlp) =
|
proc enterList*(self: var Rlp) =
|
||||||
assert isList()
|
doAssert isList()
|
||||||
position += payloadOffset()
|
position += payloadOffset()
|
||||||
|
|
||||||
proc skipElem*(rlp: var Rlp) =
|
proc skipElem*(rlp: var Rlp) =
|
||||||
rlp.position = rlp.currentElemEnd
|
rlp.position = rlp.currentElemEnd
|
||||||
|
|
||||||
iterator items*(self: var Rlp): var Rlp =
|
iterator items*(self: var Rlp): var Rlp =
|
||||||
assert isList()
|
doAssert isList()
|
||||||
|
|
||||||
var
|
var
|
||||||
payloadOffset = payloadOffset()
|
payloadOffset = payloadOffset()
|
||||||
|
@ -381,7 +381,7 @@ proc toNodes*(self: var Rlp): RlpNode =
|
||||||
for e in self:
|
for e in self:
|
||||||
result.elems.add e.toNodes
|
result.elems.add e.toNodes
|
||||||
else:
|
else:
|
||||||
assert isBlob()
|
doAssert isBlob()
|
||||||
result.kind = rlpBlob
|
result.kind = rlpBlob
|
||||||
result.bytes = toBytes()
|
result.bytes = toBytes()
|
||||||
position = currentElemEnd()
|
position = currentElemEnd()
|
||||||
|
|
|
@ -91,7 +91,7 @@ proc decRet(n: var int, delta: int): int =
|
||||||
proc maybeClosePendingLists(self) =
|
proc maybeClosePendingLists(self) =
|
||||||
while pendingLists.len > 0:
|
while pendingLists.len > 0:
|
||||||
let lastListIdx = pendingLists.len - 1
|
let lastListIdx = pendingLists.len - 1
|
||||||
assert pendingLists[lastListIdx].remainingItems >= 1
|
doAssert pendingLists[lastListIdx].remainingItems >= 1
|
||||||
if decRet(pendingLists[lastListIdx].remainingItems, 1) == 0:
|
if decRet(pendingLists[lastListIdx].remainingItems, 1) == 0:
|
||||||
# A list have been just finished. It was started in `startList`.
|
# A list have been just finished. It was started in `startList`.
|
||||||
let listStartPos = pendingLists[lastListIdx].outBytes
|
let listStartPos = pendingLists[lastListIdx].outBytes
|
||||||
|
|
|
@ -36,8 +36,8 @@ proc decodeToBinKeypath*(path: BytesRange): TrieBitRange =
|
||||||
if path[0]:
|
if path[0]:
|
||||||
path = path[4..^1]
|
path = path[4..^1]
|
||||||
|
|
||||||
assert path[0] == false
|
doAssert path[0] == false
|
||||||
assert path[1] == false
|
doAssert path[1] == false
|
||||||
var bits = path[2].int shl 1
|
var bits = path[2].int shl 1
|
||||||
bits = bits or path[3].int
|
bits = bits or path[3].int
|
||||||
|
|
||||||
|
@ -62,8 +62,8 @@ proc parseNode*(node: BytesRange): TrieNode =
|
||||||
raise newException(InvalidNode, "Invalid branch node, both child node should be 32 bytes long each")
|
raise newException(InvalidNode, "Invalid branch node, both child node should be 32 bytes long each")
|
||||||
# Output: node type, left child, right child
|
# Output: node type, left child, right child
|
||||||
result = TrieNode(kind: BRANCH_TYPE, leftChild: node[1..<33], rightChild: node[33..^1])
|
result = TrieNode(kind: BRANCH_TYPE, leftChild: node[1..<33], rightChild: node[33..^1])
|
||||||
assert(result.leftChild.len == 32)
|
doAssert(result.leftChild.len == 32)
|
||||||
assert(result.rightChild.len == 32)
|
doAssert(result.rightChild.len == 32)
|
||||||
return result
|
return result
|
||||||
of KV_TYPE:
|
of KV_TYPE:
|
||||||
if node.len <= 33:
|
if node.len <= 33:
|
||||||
|
|
|
@ -35,7 +35,7 @@ proc getRootHash*(self: BinaryTrie): TrieNodeKey {.inline.} =
|
||||||
self.rootHash
|
self.rootHash
|
||||||
|
|
||||||
template fetchNode(self: BinaryTrie, nodeHash: TrieNodeKey): TrieNode =
|
template fetchNode(self: BinaryTrie, nodeHash: TrieNodeKey): TrieNode =
|
||||||
assert(nodeHash.len == 32)
|
doAssert(nodeHash.len == 32)
|
||||||
parseNode self.db.get(nodeHash.toOpenArray).toRange
|
parseNode self.db.get(nodeHash.toOpenArray).toRange
|
||||||
|
|
||||||
proc getAux(self: BinaryTrie, nodeHash: TrieNodeKey, keyPath: TrieBitRange): BytesRange =
|
proc getAux(self: BinaryTrie, nodeHash: TrieNodeKey, keyPath: TrieBitRange): BytesRange =
|
||||||
|
|
|
@ -89,11 +89,11 @@ proc getBranch*(db: DB; rootHash: BytesContainer | KeccakHash; key: BytesContain
|
||||||
proc isValidBranch*(branch: seq[BytesRange], rootHash: BytesContainer | KeccakHash, key, value: BytesContainer): bool =
|
proc isValidBranch*(branch: seq[BytesRange], rootHash: BytesContainer | KeccakHash, key, value: BytesContainer): bool =
|
||||||
checkValidHashZ(rootHash)
|
checkValidHashZ(rootHash)
|
||||||
# branch must not be empty
|
# branch must not be empty
|
||||||
assert(branch.len != 0)
|
doAssert(branch.len != 0)
|
||||||
|
|
||||||
var db = newMemoryDB()
|
var db = newMemoryDB()
|
||||||
for node in branch:
|
for node in branch:
|
||||||
assert(node.len != 0)
|
doAssert(node.len != 0)
|
||||||
let nodeHash = keccakHash(node)
|
let nodeHash = keccakHash(node)
|
||||||
db.put(nodeHash.toOpenArray, node.toOpenArray)
|
db.put(nodeHash.toOpenArray, node.toOpenArray)
|
||||||
|
|
||||||
|
|
|
@ -122,11 +122,11 @@ template isMemoryDB(db: TrieDatabaseRef): bool =
|
||||||
db.mostInnerTransaction.parentTransaction == nil
|
db.mostInnerTransaction.parentTransaction == nil
|
||||||
|
|
||||||
proc totalRecordsInMemoryDB*(db: TrieDatabaseRef): int =
|
proc totalRecordsInMemoryDB*(db: TrieDatabaseRef): int =
|
||||||
assert isMemoryDB(db)
|
doAssert isMemoryDB(db)
|
||||||
return db.mostInnerTransaction.modifications.records.len
|
return db.mostInnerTransaction.modifications.records.len
|
||||||
|
|
||||||
iterator pairsInMemoryDB*(db: TrieDatabaseRef): (Bytes, Bytes) =
|
iterator pairsInMemoryDB*(db: TrieDatabaseRef): (Bytes, Bytes) =
|
||||||
assert isMemoryDB(db)
|
doAssert isMemoryDB(db)
|
||||||
for k, v in db.mostInnerTransaction.modifications.records:
|
for k, v in db.mostInnerTransaction.modifications.records:
|
||||||
yield (k, v.value)
|
yield (k, v.value)
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ proc keccak*(r: BytesRange): KeccakHash =
|
||||||
keccak256.digest r.toOpenArray
|
keccak256.digest r.toOpenArray
|
||||||
|
|
||||||
template asDbKey(k: TrieNodeKey): untyped =
|
template asDbKey(k: TrieNodeKey): untyped =
|
||||||
assert k.usedBytes == 32
|
doAssert k.usedBytes == 32
|
||||||
k.hash.data
|
k.hash.data
|
||||||
|
|
||||||
proc expectHash(r: Rlp): BytesRange =
|
proc expectHash(r: Rlp): BytesRange =
|
||||||
|
@ -79,7 +79,7 @@ proc isPruning*(t: HexaryTrie): bool =
|
||||||
proc getLocalBytes(x: TrieNodeKey): BytesRange =
|
proc getLocalBytes(x: TrieNodeKey): BytesRange =
|
||||||
## This proc should be used on nodes using the optimization
|
## This proc should be used on nodes using the optimization
|
||||||
## of short values within the key.
|
## of short values within the key.
|
||||||
assert x.usedBytes < 32
|
doAssert x.usedBytes < 32
|
||||||
|
|
||||||
when defined(rangesEnableUnsafeAPI):
|
when defined(rangesEnableUnsafeAPI):
|
||||||
result = unsafeRangeConstruction(x.data, x.usedBytes)
|
result = unsafeRangeConstruction(x.data, x.usedBytes)
|
||||||
|
@ -152,7 +152,7 @@ proc getKeysAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]])
|
||||||
key = path & k
|
key = path & k
|
||||||
|
|
||||||
if isLeaf:
|
if isLeaf:
|
||||||
assert(key.len mod 2 == 0)
|
doAssert(key.len mod 2 == 0)
|
||||||
return key.getBytes
|
return key.getBytes
|
||||||
else:
|
else:
|
||||||
let
|
let
|
||||||
|
@ -170,7 +170,7 @@ proc getKeysAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]])
|
||||||
|
|
||||||
var lastElem = nodeRlp.listElem(16)
|
var lastElem = nodeRlp.listElem(16)
|
||||||
if not lastElem.isEmpty:
|
if not lastElem.isEmpty:
|
||||||
assert(path.len mod 2 == 0)
|
doAssert(path.len mod 2 == 0)
|
||||||
return path.getBytes
|
return path.getBytes
|
||||||
else:
|
else:
|
||||||
raise newException(CorruptedTrieError,
|
raise newException(CorruptedTrieError,
|
||||||
|
@ -236,7 +236,7 @@ proc getPairsAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]]
|
||||||
value = nodeRlp.listElem(1)
|
value = nodeRlp.listElem(1)
|
||||||
|
|
||||||
if isLeaf:
|
if isLeaf:
|
||||||
assert(key.len mod 2 == 0)
|
doAssert(key.len mod 2 == 0)
|
||||||
return (key.getBytes, value.toBytes)
|
return (key.getBytes, value.toBytes)
|
||||||
else:
|
else:
|
||||||
let nextLookup = value.getLookup
|
let nextLookup = value.getLookup
|
||||||
|
@ -252,7 +252,7 @@ proc getPairsAux(db: DB, stack: var seq[tuple[nodeRlp: Rlp, path: NibblesRange]]
|
||||||
|
|
||||||
var lastElem = nodeRlp.listElem(16)
|
var lastElem = nodeRlp.listElem(16)
|
||||||
if not lastElem.isEmpty:
|
if not lastElem.isEmpty:
|
||||||
assert(path.len mod 2 == 0)
|
doAssert(path.len mod 2 == 0)
|
||||||
return (path.getBytes, lastElem.toBytes)
|
return (path.getBytes, lastElem.toBytes)
|
||||||
else:
|
else:
|
||||||
raise newException(CorruptedTrieError,
|
raise newException(CorruptedTrieError,
|
||||||
|
@ -338,7 +338,7 @@ proc replaceValue(data: Rlp, key: NibblesRange, value: BytesRange): Bytes =
|
||||||
let prefix = hexPrefixEncode(key, true)
|
let prefix = hexPrefixEncode(key, true)
|
||||||
return encodeList(prefix, value)
|
return encodeList(prefix, value)
|
||||||
|
|
||||||
assert data.isTrieBranch
|
doAssert data.isTrieBranch
|
||||||
if data.listLen == 2:
|
if data.listLen == 2:
|
||||||
return encodeList(data.listElem(0), value)
|
return encodeList(data.listElem(0), value)
|
||||||
|
|
||||||
|
@ -363,7 +363,7 @@ proc isTwoItemNode(self: HexaryTrie; r: Rlp): bool =
|
||||||
return r.isList and r.listLen == 2
|
return r.isList and r.listLen == 2
|
||||||
|
|
||||||
proc isLeaf(r: Rlp): bool =
|
proc isLeaf(r: Rlp): bool =
|
||||||
assert r.isList and r.listLen == 2
|
doAssert r.isList and r.listLen == 2
|
||||||
let b = r.listElem(0).toBytes()
|
let b = r.listElem(0).toBytes()
|
||||||
return (b[0] and 0x20) != 0
|
return (b[0] and 0x20) != 0
|
||||||
|
|
||||||
|
@ -399,7 +399,7 @@ proc deleteAux(self: var HexaryTrie; rlpWriter: var RlpWriter;
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc graft(self: var HexaryTrie; r: Rlp): Bytes =
|
proc graft(self: var HexaryTrie; r: Rlp): Bytes =
|
||||||
assert r.isList and r.listLen == 2
|
doAssert r.isList and r.listLen == 2
|
||||||
var (origIsLeaf, origPath) = r.extensionNodeKey
|
var (origIsLeaf, origPath) = r.extensionNodeKey
|
||||||
var value = r.listElem(1)
|
var value = r.listElem(1)
|
||||||
|
|
||||||
|
@ -410,7 +410,7 @@ proc graft(self: var HexaryTrie; r: Rlp): Bytes =
|
||||||
self.prune(nodeKey.toOpenArray)
|
self.prune(nodeKey.toOpenArray)
|
||||||
value = rlpFromBytes resolvedData
|
value = rlpFromBytes resolvedData
|
||||||
|
|
||||||
assert value.listLen == 2
|
doAssert value.listLen == 2
|
||||||
let (valueIsLeaf, valueKey) = value.extensionNodeKey
|
let (valueIsLeaf, valueKey) = value.extensionNodeKey
|
||||||
|
|
||||||
var rlpWriter = initRlpList(2)
|
var rlpWriter = initRlpList(2)
|
||||||
|
@ -424,7 +424,7 @@ proc mergeAndGraft(self: var HexaryTrie;
|
||||||
if childPos == 16:
|
if childPos == 16:
|
||||||
output.append hexPrefixEncode(zeroNibblesRange, true)
|
output.append hexPrefixEncode(zeroNibblesRange, true)
|
||||||
else:
|
else:
|
||||||
assert(not soleChild.isEmpty)
|
doAssert(not soleChild.isEmpty)
|
||||||
output.append int(hexPrefixEncodeByte(childPos))
|
output.append int(hexPrefixEncodeByte(childPos))
|
||||||
output.append(soleChild)
|
output.append(soleChild)
|
||||||
result = output.finish()
|
result = output.finish()
|
||||||
|
@ -437,7 +437,7 @@ proc deleteAt(self: var HexaryTrie;
|
||||||
if origRlp.isEmpty:
|
if origRlp.isEmpty:
|
||||||
return zeroBytesRange
|
return zeroBytesRange
|
||||||
|
|
||||||
assert origRlp.isTrieBranch
|
doAssert origRlp.isTrieBranch
|
||||||
let origBytes = origRlp.rawData
|
let origBytes = origRlp.rawData
|
||||||
if origRlp.listLen == 2:
|
if origRlp.listLen == 2:
|
||||||
let (isLeaf, k) = origRlp.extensionNodeKey
|
let (isLeaf, k) = origRlp.extensionNodeKey
|
||||||
|
@ -539,7 +539,7 @@ proc mergeAt(self: var HexaryTrie, orig: Rlp, origHash: KeccakHash,
|
||||||
if orig.isEmpty:
|
if orig.isEmpty:
|
||||||
return origWithNewValue()
|
return origWithNewValue()
|
||||||
|
|
||||||
assert orig.isTrieBranch
|
doAssert orig.isTrieBranch
|
||||||
if orig.listLen == 2:
|
if orig.listLen == 2:
|
||||||
let (isLeaf, k) = orig.extensionNodeKey
|
let (isLeaf, k) = orig.extensionNodeKey
|
||||||
var origValue = orig.listElem(1)
|
var origValue = orig.listElem(1)
|
||||||
|
@ -574,7 +574,7 @@ proc mergeAt(self: var HexaryTrie, orig: Rlp, origHash: KeccakHash,
|
||||||
var branches = initRlpList(17)
|
var branches = initRlpList(17)
|
||||||
if k.len == 0:
|
if k.len == 0:
|
||||||
# The key is now exhausted. This must be a leaf node
|
# The key is now exhausted. This must be a leaf node
|
||||||
assert isLeaf
|
doAssert isLeaf
|
||||||
for i in 0 ..< 16:
|
for i in 0 ..< 16:
|
||||||
branches.append ""
|
branches.append ""
|
||||||
branches.append origValue
|
branches.append origValue
|
||||||
|
@ -618,7 +618,7 @@ proc put*(self: var HexaryTrie; key, value: BytesRange) =
|
||||||
let root = self.root.hash
|
let root = self.root.hash
|
||||||
|
|
||||||
var rootBytes = self.db.get(root.data).toRange
|
var rootBytes = self.db.get(root.data).toRange
|
||||||
assert rootBytes.len > 0
|
doAssert rootBytes.len > 0
|
||||||
|
|
||||||
let newRootBytes = self.mergeAt(rlpFromBytes(rootBytes), root,
|
let newRootBytes = self.mergeAt(rlpFromBytes(rootBytes), root,
|
||||||
initNibbleRange(key), value)
|
initNibbleRange(key), value)
|
||||||
|
|
|
@ -49,7 +49,7 @@ proc slice*(r: NibblesRange, ibegin: int, iend = -1): NibblesRange =
|
||||||
result.ibegin = r.ibegin + ibegin
|
result.ibegin = r.ibegin + ibegin
|
||||||
let e = if iend < 0: r.iend + iend + 1
|
let e = if iend < 0: r.iend + iend + 1
|
||||||
else: r.ibegin + iend
|
else: r.ibegin + iend
|
||||||
assert ibegin >= 0 and e <= result.bytes.len * 2
|
doAssert ibegin >= 0 and e <= result.bytes.len * 2
|
||||||
result.iend = e
|
result.iend = e
|
||||||
|
|
||||||
template writeFirstByte(nibbleCountExpr) {.dirty.} =
|
template writeFirstByte(nibbleCountExpr) {.dirty.} =
|
||||||
|
@ -79,7 +79,7 @@ proc hexPrefixEncode*(r1, r2: NibblesRange, isLeaf = false): Bytes =
|
||||||
writeNibbles(r2)
|
writeNibbles(r2)
|
||||||
|
|
||||||
proc hexPrefixEncodeByte*(val: byte, isLeaf = false): byte =
|
proc hexPrefixEncodeByte*(val: byte, isLeaf = false): byte =
|
||||||
assert val < 16
|
doAssert val < 16
|
||||||
result = (((byte(isLeaf) * 2) + 1) shl 4) or val
|
result = (((byte(isLeaf) * 2) + 1) shl 4) or val
|
||||||
|
|
||||||
proc sharedPrefixLen*(lhs, rhs: NibblesRange): int =
|
proc sharedPrefixLen*(lhs, rhs: NibblesRange): int =
|
||||||
|
|
|
@ -22,8 +22,8 @@ type
|
||||||
DoubleHash = array[64, byte]
|
DoubleHash = array[64, byte]
|
||||||
|
|
||||||
proc initDoubleHash(a, b: openArray[byte]): DoubleHash =
|
proc initDoubleHash(a, b: openArray[byte]): DoubleHash =
|
||||||
assert(a.len == 32, $a.len)
|
doAssert(a.len == 32, $a.len)
|
||||||
assert(b.len == 32, $b.len)
|
doAssert(b.len == 32, $b.len)
|
||||||
copyMem(result[ 0].addr, a[0].unsafeAddr, 32)
|
copyMem(result[ 0].addr, a[0].unsafeAddr, 32)
|
||||||
copyMem(result[32].addr, b[0].unsafeAddr, 32)
|
copyMem(result[32].addr, b[0].unsafeAddr, 32)
|
||||||
|
|
||||||
|
@ -75,13 +75,13 @@ proc getAux(self: SparseBinaryTrie, path: BitRange, rootHash: ByteRange): ByteRa
|
||||||
|
|
||||||
proc get*(self: SparseBinaryTrie, key: BytesContainer): ByteRange =
|
proc get*(self: SparseBinaryTrie, key: BytesContainer): ByteRange =
|
||||||
## gets a key from the tree.
|
## gets a key from the tree.
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
let path = MutByteRange(key.toRange).bits
|
let path = MutByteRange(key.toRange).bits
|
||||||
self.getAux(path, self.rootHash)
|
self.getAux(path, self.rootHash)
|
||||||
|
|
||||||
proc get*(self: SparseBinaryTrie, key, rootHash: distinct BytesContainer): ByteRange =
|
proc get*(self: SparseBinaryTrie, key, rootHash: distinct BytesContainer): ByteRange =
|
||||||
## gets a key from the tree at a specific root.
|
## gets a key from the tree at a specific root.
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
let path = MutByteRange(key.toRange).bits
|
let path = MutByteRange(key.toRange).bits
|
||||||
self.getAux(path, rootHash.toRange)
|
self.getAux(path, rootHash.toRange)
|
||||||
|
|
||||||
|
@ -111,14 +111,14 @@ proc setAux(self: var SparseBinaryTrie, value: ByteRange,
|
||||||
proc set*(self: var SparseBinaryTrie, key, value: distinct BytesContainer) =
|
proc set*(self: var SparseBinaryTrie, key, value: distinct BytesContainer) =
|
||||||
## sets a new value for a key in the tree, returns the new root,
|
## sets a new value for a key in the tree, returns the new root,
|
||||||
## and sets the new current root of the tree.
|
## and sets the new current root of the tree.
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
let path = MutByteRange(key.toRange).bits
|
let path = MutByteRange(key.toRange).bits
|
||||||
self.rootHash = self.setAux(value.toRange, path, 0, self.rootHash)
|
self.rootHash = self.setAux(value.toRange, path, 0, self.rootHash)
|
||||||
|
|
||||||
proc set*(self: var SparseBinaryTrie, key, value, rootHash: distinct BytesContainer): ByteRange =
|
proc set*(self: var SparseBinaryTrie, key, value, rootHash: distinct BytesContainer): ByteRange =
|
||||||
## sets a new value for a key in the tree at a specific root,
|
## sets a new value for a key in the tree at a specific root,
|
||||||
## and returns the new root.
|
## and returns the new root.
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
let path = MutByteRange(key.toRange).bits
|
let path = MutByteRange(key.toRange).bits
|
||||||
self.setAux(value.toRange, path, 0, rootHash.toRange)
|
self.setAux(value.toRange, path, 0, rootHash.toRange)
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ template exists*(self: SparseBinaryTrie, key: BytesContainer): bool =
|
||||||
|
|
||||||
proc del*(self: var SparseBinaryTrie, key: BytesContainer) =
|
proc del*(self: var SparseBinaryTrie, key: BytesContainer) =
|
||||||
## Equals to setting the value to zeroBytesRange
|
## Equals to setting the value to zeroBytesRange
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
self.set(key, zeroBytesRange)
|
self.set(key, zeroBytesRange)
|
||||||
|
|
||||||
# Dictionary API
|
# Dictionary API
|
||||||
|
@ -141,7 +141,7 @@ template contains*(self: SparseBinaryTrie, key: BytesContainer): bool =
|
||||||
self.exists(key)
|
self.exists(key)
|
||||||
|
|
||||||
proc proveAux(self: SparseBinaryTrie, key, rootHash: ByteRange, output: var seq[ByteRange]): bool =
|
proc proveAux(self: SparseBinaryTrie, key, rootHash: ByteRange, output: var seq[ByteRange]): bool =
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
var currVal = self.db.get(rootHash.toOpenArray).toRange
|
var currVal = self.db.get(rootHash.toOpenArray).toRange
|
||||||
if currVal.len == 0: return false
|
if currVal.len == 0: return false
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@ let emptyNodeHashes* = makeInitialEmptyTreeHash(treeHeight)
|
||||||
|
|
||||||
# VerifyProof verifies a Merkle proof.
|
# VerifyProof verifies a Merkle proof.
|
||||||
proc verifyProofAux*(proof: seq[ByteRange], root, key, value: ByteRange): bool =
|
proc verifyProofAux*(proof: seq[ByteRange], root, key, value: ByteRange): bool =
|
||||||
assert(root.len == 32)
|
doAssert(root.len == 32)
|
||||||
assert(key.len == pathByteLen)
|
doAssert(key.len == pathByteLen)
|
||||||
var
|
var
|
||||||
path = MutByteRange(key).bits
|
path = MutByteRange(key).bits
|
||||||
curHash = keccakHash(value)
|
curHash = keccakHash(value)
|
||||||
|
|
|
@ -11,7 +11,7 @@ proc toTrieNodeKey*(hash: KeccakHash): TrieNodeKey =
|
||||||
|
|
||||||
template checkValidHashZ*(x: untyped) =
|
template checkValidHashZ*(x: untyped) =
|
||||||
when x.type isnot KeccakHash:
|
when x.type isnot KeccakHash:
|
||||||
assert(x.len == 32 or x.len == 0)
|
doAssert(x.len == 32 or x.len == 0)
|
||||||
|
|
||||||
template isZeroHash*(x: ByteRange): bool =
|
template isZeroHash*(x: ByteRange): bool =
|
||||||
x.len == 0
|
x.len == 0
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
# sig = ecc.sign(msghash)
|
# sig = ecc.sign(msghash)
|
||||||
# print(" sig='{}',".format(encode_hex(sig)))
|
# print(" sig='{}',".format(encode_hex(sig)))
|
||||||
# print(" raw_sig='{}')".format(crypto._decode_sig(sig)))
|
# print(" raw_sig='{}')".format(crypto._decode_sig(sig)))
|
||||||
# assert crypto.ecdsa_recover(msghash, sig) == pubkey
|
# doAssert crypto.ecdsa_recover(msghash, sig) == pubkey
|
||||||
# """
|
# """
|
||||||
|
|
||||||
import nimcrypto
|
import nimcrypto
|
||||||
|
|
|
@ -13,7 +13,7 @@ import eth/p2p/ecies
|
||||||
|
|
||||||
proc compare[A, B](x: openarray[A], y: openarray[B], s: int = 0): bool =
|
proc compare[A, B](x: openarray[A], y: openarray[B], s: int = 0): bool =
|
||||||
result = true
|
result = true
|
||||||
assert(s >= 0)
|
doAssert(s >= 0)
|
||||||
var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y)))
|
var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y)))
|
||||||
for i in 0..(size - 1):
|
for i in 0..(size - 1):
|
||||||
if x[i] != cast[A](y[i]):
|
if x[i] != cast[A](y[i]):
|
||||||
|
|
|
@ -110,7 +110,7 @@ asyncTest "network with 3 peers using custom protocols":
|
||||||
await sendResponseWithId(peer, abc, abcRes, reqId, "mock response")
|
await sendResponseWithId(peer, abc, abcRes, reqId, "mock response")
|
||||||
await sleepAsync(100)
|
await sleepAsync(100)
|
||||||
let r = await peer.abcReq(1)
|
let r = await peer.abcReq(1)
|
||||||
assert r.get.data == "response to #1"
|
doAssert r.get.data == "response to #1"
|
||||||
|
|
||||||
m.expect(abc.abcRes)
|
m.expect(abc.abcRes)
|
||||||
|
|
||||||
|
|
|
@ -316,11 +316,11 @@ suite "hexary trie":
|
||||||
|
|
||||||
proc isValidBranch(branch: seq[BytesRange], rootHash: KeccakHash, key, value: BytesRange): bool =
|
proc isValidBranch(branch: seq[BytesRange], rootHash: KeccakHash, key, value: BytesRange): bool =
|
||||||
# branch must not be empty
|
# branch must not be empty
|
||||||
assert(branch.len != 0)
|
doAssert(branch.len != 0)
|
||||||
|
|
||||||
var db = newMemoryDB()
|
var db = newMemoryDB()
|
||||||
for node in branch:
|
for node in branch:
|
||||||
assert(node.len != 0)
|
doAssert(node.len != 0)
|
||||||
let nodeHash = hexary.keccak(node)
|
let nodeHash = hexary.keccak(node)
|
||||||
db.put(nodeHash.data, node.toOpenArray)
|
db.put(nodeHash.data, node.toOpenArray)
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ type
|
||||||
value*: string
|
value*: string
|
||||||
|
|
||||||
proc randGen*[T](minVal, maxVal: T): RandGen[T] =
|
proc randGen*[T](minVal, maxVal: T): RandGen[T] =
|
||||||
assert(minVal <= maxVal)
|
doAssert(minVal <= maxVal)
|
||||||
result.minVal = minVal
|
result.minVal = minVal
|
||||||
result.maxVal = maxVal
|
result.maxVal = maxVal
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue