This commit is contained in:
Jacek Sieka 2020-04-18 10:17:59 +02:00 committed by GitHub
parent e50b5e34cf
commit 1646d78d83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 80 additions and 81 deletions

View File

@ -17,7 +17,7 @@ requires "nim >= 1.2.0",
proc runTest(path: string) = proc runTest(path: string) =
echo "\nRunning: ", path echo "\nRunning: ", path
exec "nim c -r -d:release -d:chronicles_log_level=ERROR --verbosity:0 --hints:off --warnings:off " & path exec "nim c -r -d:release -d:chronicles_log_level=ERROR --verbosity:0 --hints:off " & path
rmFile path rmFile path
proc runKeyfileTests() = proc runKeyfileTests() =
@ -66,31 +66,13 @@ task test_p2p, "run p2p tests":
runP2pTests() runP2pTests()
proc runRlpTests() = proc runRlpTests() =
for filename in [ runTest("tests/rlp/all_tests")
"test_api_usage",
"test_json_suite",
"test_object_serialization",
]:
runTest("tests/rlp/" & filename)
task test_rlp, "run rlp tests": task test_rlp, "run rlp tests":
runRlpTests() runRlpTests()
proc runTrieTests() = proc runTrieTests() =
for filename in [ runTest("tests/trie/all_tests")
"test_binaries_utils",
"test_bin_trie",
"test_branches_utils",
"test_caching_db_backend",
"test_examples",
"test_hexary_trie",
"test_json_suite",
"test_nibbles",
"test_sparse_binary_trie",
"test_storage_backends",
"test_transaction_db",
]:
runTest("tests/trie/" & filename)
task test_trie, "run trie tests": task test_trie, "run trie tests":
runTrieTests() runTrieTests()

View File

@ -261,7 +261,7 @@ proc append*(rlpWriter: var RlpWriter, value: StUint) =
rlpWriter.append bytes.toOpenArray(bytes.len - nonZeroBytes, rlpWriter.append bytes.toOpenArray(bytes.len - nonZeroBytes,
bytes.len - 1) bytes.len - 1)
else: else:
rlpWriter.append(value.toInt) rlpWriter.append(value.truncate(int))
proc read*(rlp: var Rlp, T: typedesc[Stint]): T {.inline.} = proc read*(rlp: var Rlp, T: typedesc[Stint]): T {.inline.} =
# The Ethereum Yellow Paper defines the RLP serialization only # The Ethereum Yellow Paper defines the RLP serialization only

View File

@ -168,7 +168,7 @@ proc newSyncContext(chain: AbstractChainDB, peerPool: PeerPool): SyncContext =
new result new result
result.chain = chain result.chain = chain
result.peerPool = peerPool result.peerPool = peerPool
result.trustedPeers = initSet[Peer]() result.trustedPeers = initHashSet[Peer]()
result.finalizedBlock = chain.getBestBlockHeader().blockNumber result.finalizedBlock = chain.getBestBlockHeader().blockNumber
proc handleLostPeer(ctx: SyncContext) = proc handleLostPeer(ctx: SyncContext) =

View File

@ -122,7 +122,8 @@ proc eciesEncrypt*(input: openarray[byte], output: var openarray[byte],
copyMem(addr encKey[0], addr material[0], aes128.sizeKey) copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
var macKey = sha256.digest(material, ostart = KeyLength div 2) var macKey =
sha256.digest(material.toOpenArray(KeyLength div 2, material.high))
burnMem(material) burnMem(material)
var header = cast[ptr EciesHeader](addr output[0]) var header = cast[ptr EciesHeader](addr output[0])
@ -189,7 +190,8 @@ proc eciesDecrypt*(input: openarray[byte],
burnMem(secret) burnMem(secret)
copyMem(addr encKey[0], addr material[0], aes128.sizeKey) copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
var macKey = sha256.digest(material, ostart = KeyLength div 2) var macKey =
sha256.digest(material.toOpenArray(KeyLength div 2, material.high))
burnMem(material) burnMem(material)
let macsize = eciesMacLength(len(input) - eciesOverheadLength()) let macsize = eciesMacLength(len(input) - eciesOverheadLength())

View File

@ -363,8 +363,8 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
## It approaches the target by querying nodes that are closer to it on each iteration. The ## It approaches the target by querying nodes that are closer to it on each iteration. The
## given target does not need to be an actual node identifier. ## given target does not need to be an actual node identifier.
var nodesAsked = initSet[Node]() var nodesAsked = initHashSet[Node]()
var nodesSeen = initSet[Node]() var nodesSeen = initHashSet[Node]()
proc findNode(nodeId: NodeId, remote: Node): Future[seq[Node]] {.async.} = proc findNode(nodeId: NodeId, remote: Node): Future[seq[Node]] {.async.} =
k.wire.sendFindNode(remote, nodeId) k.wire.sendFindNode(remote, nodeId)
@ -388,7 +388,7 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
result = candidates result = candidates
proc excludeIfAsked(nodes: seq[Node]): seq[Node] = proc excludeIfAsked(nodes: seq[Node]): seq[Node] =
result = toSeq(items(nodes.toSet() - nodesAsked)) result = toSeq(items(nodes.toHashSet() - nodesAsked))
sortByDistance(result, nodeId, FIND_CONCURRENCY) sortByDistance(result, nodeId, FIND_CONCURRENCY)
var closest = k.routing.neighbours(nodeId) var closest = k.routing.neighbours(nodeId)
@ -396,7 +396,7 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
var nodesToAsk = excludeIfAsked(closest) var nodesToAsk = excludeIfAsked(closest)
while nodesToAsk.len != 0: while nodesToAsk.len != 0:
trace "Node lookup; querying ", nodesToAsk trace "Node lookup; querying ", nodesToAsk
nodesAsked.incl(nodesToAsk.toSet()) nodesAsked.incl(nodesToAsk.toHashSet())
let results = await all(nodesToAsk.mapIt(findNode(nodeId, it))) let results = await all(nodesToAsk.mapIt(findNode(nodeId, it)))
for candidates in results: for candidates in results:
closest.add(candidates) closest.add(candidates)
@ -489,7 +489,7 @@ proc randomNodes*(k: KademliaProtocol, count: int): seq[Node] =
count = sz count = sz
result = newSeqOfCap[Node](count) result = newSeqOfCap[Node](count)
var seen = initSet[Node]() var seen = initHashSet[Node]()
# This is a rather inneficient way of randomizing nodes from all buckets, but even if we # This is a rather inneficient way of randomizing nodes from all buckets, but even if we
# iterate over all nodes in the routing table, the time it takes would still be # iterate over all nodes in the routing table, the time it takes would still be

View File

@ -21,7 +21,7 @@ proc newPeerPool*(network: EthereumNode,
result.networkId = networkId result.networkId = networkId
result.discovery = discovery result.discovery = discovery
result.connectedNodes = initTable[Node, Peer]() result.connectedNodes = initTable[Node, Peer]()
result.connectingNodes = initSet[Node]() result.connectingNodes = initHashSet[Node]()
result.observers = initTable[int, PeerObserver]() result.observers = initTable[int, PeerObserver]()
result.listenPort = listenPort result.listenPort = listenPort

View File

@ -1,7 +1,7 @@
import import
deques, tables, deques, tables,
eth/[rlp, keys], chronos, eth/common/eth_types, eth/[rlp, keys], chronos, eth/common/eth_types,
../enode, ../kademlia, ../discovery, ../options, ../rlpxcrypt ../enode, ../kademlia, ../discovery, ../rlpxcrypt
const const
useSnappy* = defined(useSnappy) useSnappy* = defined(useSnappy)

View File

@ -437,7 +437,6 @@ proc recvMsg*(peer: Peer): Future[tuple[msgId: int, msgData: Rlp]] {.async.} =
"Cannot read RLPx message id") "Cannot read RLPx message id")
proc checkedRlpRead(peer: Peer, r: var Rlp, MsgType: type): auto {.inline.} = proc checkedRlpRead(peer: Peer, r: var Rlp, MsgType: type): auto {.inline.} =
let tmp = r
when defined(release): when defined(release):
return r.read(MsgType) return r.read(MsgType)
else: else:

View File

@ -419,7 +419,7 @@ when defined(testing):
setup: setup:
var lesNetwork = new LesNetwork var lesNetwork = new LesNetwork
lesNetwork.peers = initSet[LesPeer]() lesNetwork.peers = initHashSet[LesPeer]()
lesNetwork.initFlowControl(dummyLes.protocolInfo, lesNetwork.initFlowControl(dummyLes.protocolInfo,
reqCostTarget = 300, reqCostTarget = 300,
maxReqCount = 5, maxReqCount = 5,

View File

@ -77,7 +77,7 @@ const
keyAnnounceSignature = "sign" keyAnnounceSignature = "sign"
proc initProtocolState(network: LesNetwork, node: EthereumNode) {.gcsafe.} = proc initProtocolState(network: LesNetwork, node: EthereumNode) {.gcsafe.} =
network.peers = initSet[LesPeer]() network.peers = initHashSet[LesPeer]()
proc addPeer(network: LesNetwork, peer: LesPeer) = proc addPeer(network: LesNetwork, peer: LesPeer) =
network.enlistInFlowControl peer network.enlistInFlowControl peer

View File

@ -1,6 +1,3 @@
import
../types
const const
MAX_LENGTH_BYTES* = 8 MAX_LENGTH_BYTES* = 8

View File

@ -12,7 +12,7 @@ proc newCachingDB*(backing: TrieDatabaseRef): CachingDB =
result.new() result.new()
result.backing = backing result.backing = backing
result.changed = initTable[seq[byte], seq[byte]]() result.changed = initTable[seq[byte], seq[byte]]()
result.deleted = initSet[seq[byte]]() result.deleted = initHashSet[seq[byte]]()
proc get*(db: CachingDB, key: openarray[byte]): seq[byte] = proc get*(db: CachingDB, key: openarray[byte]): seq[byte] =
let key = @key let key = @key

View File

@ -30,7 +30,7 @@ type
MDB_Dbi = distinct cuint MDB_Dbi = distinct cuint
MDB_val = object MDB_val = object
mv_size: csize mv_size: csize_t
mv_data: pointer mv_data: pointer
# this is only a subset of LMDB API needed in nimbus # this is only a subset of LMDB API needed in nimbus
@ -77,7 +77,7 @@ proc txCommit*(db: ChainDB, manualCommit = true): bool =
mdb_dbi_close(db.env, db.dbi) mdb_dbi_close(db.env, db.dbi)
proc toMdbVal(val: openArray[byte]): MDB_Val = proc toMdbVal(val: openArray[byte]): MDB_Val =
result.mv_size = val.len result.mv_size = csize_t(val.len)
result.mv_data = unsafeAddr val[0] result.mv_data = unsafeAddr val[0]
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] = proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =

View File

@ -1,5 +1,5 @@
import import
stew/ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types, stew/ranges/[typedranges, bitranges], eth/rlp/types,
trie_defs, db, binaries, trie_utils trie_defs, db, binaries, trie_utils
export export

View File

@ -108,7 +108,7 @@ proc put*(db: MemoryLayer, key, val: openarray[byte]) =
proc newMemoryLayer: MemoryLayer = proc newMemoryLayer: MemoryLayer =
result.new result.new
result.records = initTable[Bytes, MemDBRec]() result.records = initTable[Bytes, MemDBRec]()
result.deleted = initSet[Bytes]() result.deleted = initHashSet[Bytes]()
proc commit(memDb: MemoryLayer, db: TrieDatabaseRef, applyDeletes: bool = true) = proc commit(memDb: MemoryLayer, db: TrieDatabaseRef, applyDeletes: bool = true) =
if applyDeletes: if applyDeletes:

View File

@ -1,5 +1,5 @@
import import
stew/ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types, stew/ranges/[typedranges, bitranges], eth/rlp/types,
trie_defs, trie_utils, db, sparse_proofs trie_defs, trie_utils, db, sparse_proofs
export export

View File

@ -1,6 +1,6 @@
import import
stew/ranges/[typedranges, bitranges], stew/ranges/[typedranges, bitranges],
trie_defs, db, trie_utils trie_defs, trie_utils
const const
treeHeight* = 160 treeHeight* = 160

View File

@ -1,10 +1,8 @@
import import
strutils, parseutils, stew/byteutils,
stew/ranges/[typedranges, ptr_arith], nimcrypto/[hash, keccak], stew/ranges/[typedranges, ptr_arith], nimcrypto/[hash, keccak],
trie_defs, binaries trie_defs, binaries
#proc baseAddr*(x: Bytes): ptr byte = x[0].unsafeAddr
proc toTrieNodeKey*(hash: KeccakHash): TrieNodeKey = proc toTrieNodeKey*(hash: KeccakHash): TrieNodeKey =
result = newRange[byte](32) result = newRange[byte](32)
copyMem(result.baseAddr, hash.data.baseAddr, 32) copyMem(result.baseAddr, hash.data.baseAddr, 32)
@ -26,17 +24,7 @@ proc toRange*(str: string): ByteRange =
result = toRange(s) result = toRange(s)
proc hashFromHex*(bits: static[int], input: string): MDigest[bits] = proc hashFromHex*(bits: static[int], input: string): MDigest[bits] =
if input.len != bits div 4: MDigest(data: hexToByteArray[bits div 8](input))
raise newException(ValueError,
"The input string has incorrect size")
for i in 0 ..< bits div 8:
var nextByte: int
if parseHex(input, nextByte, i*2, 2) == 2:
result.data[i] = uint8(nextByte)
else:
raise newException(ValueError,
"The input string contains invalid characters")
template hashFromHex*(s: static[string]): untyped = hashFromHex(s.len * 4, s) template hashFromHex*(s: static[string]): untyped = hashFromHex(s.len * 4, s)

4
tests/rlp/all_tests.nim Normal file
View File

@ -0,0 +1,4 @@
import
./test_api_usage,
./test_json_suite,
./test_object_serialization

View File

@ -31,9 +31,7 @@ test "you cannot finish a list without appending enough elements":
writer.append "bar" writer.append "bar"
expect Defect: expect Defect:
let result = writer.finish discard writer.finish
proc withNewLines(x: string): string = x & "\n"
test "encode/decode object": test "encode/decode object":
type type

View File

@ -1,3 +1,5 @@
{.used.}
import import
os, strutils, strformat, os, strutils, strformat,
util/json_testing util/json_testing

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, times, eth/rlp, util/json_testing unittest, times, eth/rlp, util/json_testing
@ -17,8 +19,6 @@ type
b: string b: string
f: Foo f: Foo
CompressedFoo = object
CustomSerialized = object CustomSerialized = object
customFoo {.rlpCustomSerialization.}: Foo customFoo {.rlpCustomSerialization.}: Foo
ignored {.rlpIgnore.}: int ignored {.rlpIgnore.}: int

12
tests/trie/all_tests.nim Normal file
View File

@ -0,0 +1,12 @@
import
test_bin_trie,
test_binaries_utils,
test_branches_utils,
test_caching_db_backend,
test_examples,
test_hexary_trie,
test_json_suite,
test_nibbles,
test_sparse_binary_trie,
test_storage_backends,
test_transaction_db

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, random, unittest, random,
eth/trie/[trie_defs, db, binary], eth/trie/[trie_defs, db, binary],
@ -66,10 +68,8 @@ suite "binary trie":
if will_raise_error: if will_raise_error:
try: try:
trie.deleteSubtrie(key_to_be_deleted) trie.deleteSubtrie(key_to_be_deleted)
except NodeOverrideError as E: except NodeOverrideError:
discard discard
except:
check(false)
else: else:
let root_hash_before_delete = trie.getRootHash() let root_hash_before_delete = trie.getRootHash()
trie.deleteSubtrie(key_to_be_deleted) trie.deleteSubtrie(key_to_be_deleted)
@ -101,10 +101,8 @@ suite "binary trie":
if if_error: if if_error:
try: try:
trie.delete(invalidKey) trie.delete(invalidKey)
except NodeOverrideError as E: except NodeOverrideError:
discard discard
except:
check(false)
else: else:
let previous_root_hash = trie.getRootHash() let previous_root_hash = trie.getRootHash()
trie.delete(invalidKey) trie.delete(invalidKey)

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, strutils, unittest, strutils,
stew/ranges/bitranges, eth/rlp/types, nimcrypto/[keccak, hash], stew/ranges/bitranges, eth/rlp/types, nimcrypto/[keccak, hash],

View File

@ -1,5 +1,7 @@
{.used.}
import import
random, sets, unittest, strutils, sets, sets, unittest, strutils, sets,
eth/trie/[db, binary, branches] eth/trie/[db, binary, branches]
suite "branches utils": suite "branches utils":
@ -91,7 +93,7 @@ suite "branches utils":
for c in trieNodesData: for c in trieNodesData:
let root = c[0].toRange() let root = c[0].toRange()
let nodes = toRanges(c[1]) let nodes = toRanges(c[1])
check toSet(nodes) == toSet(getTrieNodes(db, root)) check toHashSet(nodes) == toHashSet(getTrieNodes(db, root))
const witnessData = [ const witnessData = [
("\x12\x34\x56\x78\x9b", ("\x12\x34\x56\x78\x9b",
@ -137,6 +139,6 @@ suite "branches utils":
let nodes = toRanges(c[1]) let nodes = toRanges(c[1])
if nodes.len != 0: if nodes.len != 0:
let x = toSet(nodes) let x = toHashSet(nodes)
let y = toSet(getWitness(db, trie.getRootHash(), key)) let y = toHashSet(getWitness(db, trie.getRootHash(), key))
check x == y check x == y

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, unittest,
eth/trie/db, eth/trie/db,

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, unittest,
nimcrypto/[keccak, hash], nimcrypto/[keccak, hash],

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, sequtils, os, unittest, sequtils, os,
stew/ranges/typedranges, eth/trie/[hexary, db, trie_defs], nimcrypto/utils, stew/ranges/typedranges, eth/trie/[hexary, db, trie_defs], nimcrypto/utils,

View File

@ -1,12 +1,11 @@
{.used.}
import import
os, json, tables, sequtils, strutils, algorithm, os, json, tables, strutils, algorithm,
eth/rlp/types, nimcrypto/utils, eth/rlp/types,
eth/trie/[trie_defs, db, hexary], eth/trie/[trie_defs, db, hexary],
./testutils ./testutils
proc `==`(lhs: JsonNode, rhs: string): bool =
lhs.kind == JString and lhs.str == rhs
type type
TestOp = object TestOp = object
idx: int idx: int

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, unittest,
eth/trie/nibbles eth/trie/nibbles

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, random, unittest, random,
eth/trie/[trie_defs, db, sparse_binary, sparse_proofs], eth/trie/[trie_defs, db, sparse_binary, sparse_proofs],

View File

@ -1,3 +1,5 @@
{.used.}
import import
unittest, macros, os, unittest, macros, os,
eth/trie/backends/[rocksdb_backend, sqlite_backend, lmdb_backend] eth/trie/backends/[rocksdb_backend, sqlite_backend, lmdb_backend]

View File

@ -1,5 +1,7 @@
{.used.}
import import
unittest, strutils, sequtils, os, unittest,
eth/trie/[db, trie_defs], ./testutils, eth/trie/[db, trie_defs], ./testutils,
eth/rlp/types as rlpTypes eth/rlp/types as rlpTypes

View File

@ -56,7 +56,7 @@ proc randList*(T: typedesc, strGen, listGen: RandGen, unique: bool = true): seq[
let listLen = listGen.getVal() let listLen = listGen.getVal()
result = newSeqOfCap[T](listLen) result = newSeqOfCap[T](listLen)
if unique: if unique:
var set = initSet[T]() var set = initHashSet[T]()
for len in 0..<listLen: for len in 0..<listLen:
while true: while true:
let x = randPrimitives[T](strGen.getVal()) let x = randPrimitives[T](strGen.getVal())