mirror of https://github.com/status-im/nim-eth.git
cleanups (#226)
This commit is contained in:
parent
e50b5e34cf
commit
1646d78d83
24
eth.nimble
24
eth.nimble
|
@ -17,7 +17,7 @@ requires "nim >= 1.2.0",
|
|||
|
||||
proc runTest(path: string) =
|
||||
echo "\nRunning: ", path
|
||||
exec "nim c -r -d:release -d:chronicles_log_level=ERROR --verbosity:0 --hints:off --warnings:off " & path
|
||||
exec "nim c -r -d:release -d:chronicles_log_level=ERROR --verbosity:0 --hints:off " & path
|
||||
rmFile path
|
||||
|
||||
proc runKeyfileTests() =
|
||||
|
@ -66,31 +66,13 @@ task test_p2p, "run p2p tests":
|
|||
runP2pTests()
|
||||
|
||||
proc runRlpTests() =
|
||||
for filename in [
|
||||
"test_api_usage",
|
||||
"test_json_suite",
|
||||
"test_object_serialization",
|
||||
]:
|
||||
runTest("tests/rlp/" & filename)
|
||||
runTest("tests/rlp/all_tests")
|
||||
|
||||
task test_rlp, "run rlp tests":
|
||||
runRlpTests()
|
||||
|
||||
proc runTrieTests() =
|
||||
for filename in [
|
||||
"test_binaries_utils",
|
||||
"test_bin_trie",
|
||||
"test_branches_utils",
|
||||
"test_caching_db_backend",
|
||||
"test_examples",
|
||||
"test_hexary_trie",
|
||||
"test_json_suite",
|
||||
"test_nibbles",
|
||||
"test_sparse_binary_trie",
|
||||
"test_storage_backends",
|
||||
"test_transaction_db",
|
||||
]:
|
||||
runTest("tests/trie/" & filename)
|
||||
runTest("tests/trie/all_tests")
|
||||
|
||||
task test_trie, "run trie tests":
|
||||
runTrieTests()
|
||||
|
|
|
@ -261,7 +261,7 @@ proc append*(rlpWriter: var RlpWriter, value: StUint) =
|
|||
rlpWriter.append bytes.toOpenArray(bytes.len - nonZeroBytes,
|
||||
bytes.len - 1)
|
||||
else:
|
||||
rlpWriter.append(value.toInt)
|
||||
rlpWriter.append(value.truncate(int))
|
||||
|
||||
proc read*(rlp: var Rlp, T: typedesc[Stint]): T {.inline.} =
|
||||
# The Ethereum Yellow Paper defines the RLP serialization only
|
||||
|
|
|
@ -168,7 +168,7 @@ proc newSyncContext(chain: AbstractChainDB, peerPool: PeerPool): SyncContext =
|
|||
new result
|
||||
result.chain = chain
|
||||
result.peerPool = peerPool
|
||||
result.trustedPeers = initSet[Peer]()
|
||||
result.trustedPeers = initHashSet[Peer]()
|
||||
result.finalizedBlock = chain.getBestBlockHeader().blockNumber
|
||||
|
||||
proc handleLostPeer(ctx: SyncContext) =
|
||||
|
|
|
@ -122,7 +122,8 @@ proc eciesEncrypt*(input: openarray[byte], output: var openarray[byte],
|
|||
|
||||
copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
|
||||
|
||||
var macKey = sha256.digest(material, ostart = KeyLength div 2)
|
||||
var macKey =
|
||||
sha256.digest(material.toOpenArray(KeyLength div 2, material.high))
|
||||
burnMem(material)
|
||||
|
||||
var header = cast[ptr EciesHeader](addr output[0])
|
||||
|
@ -189,7 +190,8 @@ proc eciesDecrypt*(input: openarray[byte],
|
|||
burnMem(secret)
|
||||
|
||||
copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
|
||||
var macKey = sha256.digest(material, ostart = KeyLength div 2)
|
||||
var macKey =
|
||||
sha256.digest(material.toOpenArray(KeyLength div 2, material.high))
|
||||
burnMem(material)
|
||||
|
||||
let macsize = eciesMacLength(len(input) - eciesOverheadLength())
|
||||
|
|
|
@ -363,8 +363,8 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
|
|||
|
||||
## It approaches the target by querying nodes that are closer to it on each iteration. The
|
||||
## given target does not need to be an actual node identifier.
|
||||
var nodesAsked = initSet[Node]()
|
||||
var nodesSeen = initSet[Node]()
|
||||
var nodesAsked = initHashSet[Node]()
|
||||
var nodesSeen = initHashSet[Node]()
|
||||
|
||||
proc findNode(nodeId: NodeId, remote: Node): Future[seq[Node]] {.async.} =
|
||||
k.wire.sendFindNode(remote, nodeId)
|
||||
|
@ -388,7 +388,7 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
|
|||
result = candidates
|
||||
|
||||
proc excludeIfAsked(nodes: seq[Node]): seq[Node] =
|
||||
result = toSeq(items(nodes.toSet() - nodesAsked))
|
||||
result = toSeq(items(nodes.toHashSet() - nodesAsked))
|
||||
sortByDistance(result, nodeId, FIND_CONCURRENCY)
|
||||
|
||||
var closest = k.routing.neighbours(nodeId)
|
||||
|
@ -396,7 +396,7 @@ proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
|
|||
var nodesToAsk = excludeIfAsked(closest)
|
||||
while nodesToAsk.len != 0:
|
||||
trace "Node lookup; querying ", nodesToAsk
|
||||
nodesAsked.incl(nodesToAsk.toSet())
|
||||
nodesAsked.incl(nodesToAsk.toHashSet())
|
||||
let results = await all(nodesToAsk.mapIt(findNode(nodeId, it)))
|
||||
for candidates in results:
|
||||
closest.add(candidates)
|
||||
|
@ -489,7 +489,7 @@ proc randomNodes*(k: KademliaProtocol, count: int): seq[Node] =
|
|||
count = sz
|
||||
|
||||
result = newSeqOfCap[Node](count)
|
||||
var seen = initSet[Node]()
|
||||
var seen = initHashSet[Node]()
|
||||
|
||||
# This is a rather inneficient way of randomizing nodes from all buckets, but even if we
|
||||
# iterate over all nodes in the routing table, the time it takes would still be
|
||||
|
|
|
@ -21,7 +21,7 @@ proc newPeerPool*(network: EthereumNode,
|
|||
result.networkId = networkId
|
||||
result.discovery = discovery
|
||||
result.connectedNodes = initTable[Node, Peer]()
|
||||
result.connectingNodes = initSet[Node]()
|
||||
result.connectingNodes = initHashSet[Node]()
|
||||
result.observers = initTable[int, PeerObserver]()
|
||||
result.listenPort = listenPort
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
deques, tables,
|
||||
eth/[rlp, keys], chronos, eth/common/eth_types,
|
||||
../enode, ../kademlia, ../discovery, ../options, ../rlpxcrypt
|
||||
../enode, ../kademlia, ../discovery, ../rlpxcrypt
|
||||
|
||||
const
|
||||
useSnappy* = defined(useSnappy)
|
||||
|
|
|
@ -437,7 +437,6 @@ proc recvMsg*(peer: Peer): Future[tuple[msgId: int, msgData: Rlp]] {.async.} =
|
|||
"Cannot read RLPx message id")
|
||||
|
||||
proc checkedRlpRead(peer: Peer, r: var Rlp, MsgType: type): auto {.inline.} =
|
||||
let tmp = r
|
||||
when defined(release):
|
||||
return r.read(MsgType)
|
||||
else:
|
||||
|
|
|
@ -419,7 +419,7 @@ when defined(testing):
|
|||
|
||||
setup:
|
||||
var lesNetwork = new LesNetwork
|
||||
lesNetwork.peers = initSet[LesPeer]()
|
||||
lesNetwork.peers = initHashSet[LesPeer]()
|
||||
lesNetwork.initFlowControl(dummyLes.protocolInfo,
|
||||
reqCostTarget = 300,
|
||||
maxReqCount = 5,
|
||||
|
|
|
@ -77,7 +77,7 @@ const
|
|||
keyAnnounceSignature = "sign"
|
||||
|
||||
proc initProtocolState(network: LesNetwork, node: EthereumNode) {.gcsafe.} =
|
||||
network.peers = initSet[LesPeer]()
|
||||
network.peers = initHashSet[LesPeer]()
|
||||
|
||||
proc addPeer(network: LesNetwork, peer: LesPeer) =
|
||||
network.enlistInFlowControl peer
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
import
|
||||
../types
|
||||
|
||||
const
|
||||
MAX_LENGTH_BYTES* = 8
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ proc newCachingDB*(backing: TrieDatabaseRef): CachingDB =
|
|||
result.new()
|
||||
result.backing = backing
|
||||
result.changed = initTable[seq[byte], seq[byte]]()
|
||||
result.deleted = initSet[seq[byte]]()
|
||||
result.deleted = initHashSet[seq[byte]]()
|
||||
|
||||
proc get*(db: CachingDB, key: openarray[byte]): seq[byte] =
|
||||
let key = @key
|
||||
|
|
|
@ -30,7 +30,7 @@ type
|
|||
MDB_Dbi = distinct cuint
|
||||
|
||||
MDB_val = object
|
||||
mv_size: csize
|
||||
mv_size: csize_t
|
||||
mv_data: pointer
|
||||
|
||||
# this is only a subset of LMDB API needed in nimbus
|
||||
|
@ -77,7 +77,7 @@ proc txCommit*(db: ChainDB, manualCommit = true): bool =
|
|||
mdb_dbi_close(db.env, db.dbi)
|
||||
|
||||
proc toMdbVal(val: openArray[byte]): MDB_Val =
|
||||
result.mv_size = val.len
|
||||
result.mv_size = csize_t(val.len)
|
||||
result.mv_data = unsafeAddr val[0]
|
||||
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import
|
||||
stew/ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types,
|
||||
stew/ranges/[typedranges, bitranges], eth/rlp/types,
|
||||
trie_defs, db, binaries, trie_utils
|
||||
|
||||
export
|
||||
|
|
|
@ -108,7 +108,7 @@ proc put*(db: MemoryLayer, key, val: openarray[byte]) =
|
|||
proc newMemoryLayer: MemoryLayer =
|
||||
result.new
|
||||
result.records = initTable[Bytes, MemDBRec]()
|
||||
result.deleted = initSet[Bytes]()
|
||||
result.deleted = initHashSet[Bytes]()
|
||||
|
||||
proc commit(memDb: MemoryLayer, db: TrieDatabaseRef, applyDeletes: bool = true) =
|
||||
if applyDeletes:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import
|
||||
stew/ranges/[ptr_arith, typedranges, bitranges], eth/rlp/types,
|
||||
stew/ranges/[typedranges, bitranges], eth/rlp/types,
|
||||
trie_defs, trie_utils, db, sparse_proofs
|
||||
|
||||
export
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import
|
||||
stew/ranges/[typedranges, bitranges],
|
||||
trie_defs, db, trie_utils
|
||||
trie_defs, trie_utils
|
||||
|
||||
const
|
||||
treeHeight* = 160
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
import
|
||||
strutils, parseutils,
|
||||
stew/byteutils,
|
||||
stew/ranges/[typedranges, ptr_arith], nimcrypto/[hash, keccak],
|
||||
trie_defs, binaries
|
||||
|
||||
#proc baseAddr*(x: Bytes): ptr byte = x[0].unsafeAddr
|
||||
|
||||
proc toTrieNodeKey*(hash: KeccakHash): TrieNodeKey =
|
||||
result = newRange[byte](32)
|
||||
copyMem(result.baseAddr, hash.data.baseAddr, 32)
|
||||
|
@ -26,17 +24,7 @@ proc toRange*(str: string): ByteRange =
|
|||
result = toRange(s)
|
||||
|
||||
proc hashFromHex*(bits: static[int], input: string): MDigest[bits] =
|
||||
if input.len != bits div 4:
|
||||
raise newException(ValueError,
|
||||
"The input string has incorrect size")
|
||||
|
||||
for i in 0 ..< bits div 8:
|
||||
var nextByte: int
|
||||
if parseHex(input, nextByte, i*2, 2) == 2:
|
||||
result.data[i] = uint8(nextByte)
|
||||
else:
|
||||
raise newException(ValueError,
|
||||
"The input string contains invalid characters")
|
||||
MDigest(data: hexToByteArray[bits div 8](input))
|
||||
|
||||
template hashFromHex*(s: static[string]): untyped = hashFromHex(s.len * 4, s)
|
||||
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
import
|
||||
./test_api_usage,
|
||||
./test_json_suite,
|
||||
./test_object_serialization
|
|
@ -31,9 +31,7 @@ test "you cannot finish a list without appending enough elements":
|
|||
writer.append "bar"
|
||||
|
||||
expect Defect:
|
||||
let result = writer.finish
|
||||
|
||||
proc withNewLines(x: string): string = x & "\n"
|
||||
discard writer.finish
|
||||
|
||||
test "encode/decode object":
|
||||
type
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
os, strutils, strformat,
|
||||
util/json_testing
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, times, eth/rlp, util/json_testing
|
||||
|
||||
|
@ -17,8 +19,6 @@ type
|
|||
b: string
|
||||
f: Foo
|
||||
|
||||
CompressedFoo = object
|
||||
|
||||
CustomSerialized = object
|
||||
customFoo {.rlpCustomSerialization.}: Foo
|
||||
ignored {.rlpIgnore.}: int
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
import
|
||||
test_bin_trie,
|
||||
test_binaries_utils,
|
||||
test_branches_utils,
|
||||
test_caching_db_backend,
|
||||
test_examples,
|
||||
test_hexary_trie,
|
||||
test_json_suite,
|
||||
test_nibbles,
|
||||
test_sparse_binary_trie,
|
||||
test_storage_backends,
|
||||
test_transaction_db
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, random,
|
||||
eth/trie/[trie_defs, db, binary],
|
||||
|
@ -66,10 +68,8 @@ suite "binary trie":
|
|||
if will_raise_error:
|
||||
try:
|
||||
trie.deleteSubtrie(key_to_be_deleted)
|
||||
except NodeOverrideError as E:
|
||||
except NodeOverrideError:
|
||||
discard
|
||||
except:
|
||||
check(false)
|
||||
else:
|
||||
let root_hash_before_delete = trie.getRootHash()
|
||||
trie.deleteSubtrie(key_to_be_deleted)
|
||||
|
@ -101,10 +101,8 @@ suite "binary trie":
|
|||
if if_error:
|
||||
try:
|
||||
trie.delete(invalidKey)
|
||||
except NodeOverrideError as E:
|
||||
except NodeOverrideError:
|
||||
discard
|
||||
except:
|
||||
check(false)
|
||||
else:
|
||||
let previous_root_hash = trie.getRootHash()
|
||||
trie.delete(invalidKey)
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, strutils,
|
||||
stew/ranges/bitranges, eth/rlp/types, nimcrypto/[keccak, hash],
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
random, sets, unittest, strutils, sets,
|
||||
sets, unittest, strutils, sets,
|
||||
eth/trie/[db, binary, branches]
|
||||
|
||||
suite "branches utils":
|
||||
|
@ -91,7 +93,7 @@ suite "branches utils":
|
|||
for c in trieNodesData:
|
||||
let root = c[0].toRange()
|
||||
let nodes = toRanges(c[1])
|
||||
check toSet(nodes) == toSet(getTrieNodes(db, root))
|
||||
check toHashSet(nodes) == toHashSet(getTrieNodes(db, root))
|
||||
|
||||
const witnessData = [
|
||||
("\x12\x34\x56\x78\x9b",
|
||||
|
@ -137,6 +139,6 @@ suite "branches utils":
|
|||
let nodes = toRanges(c[1])
|
||||
|
||||
if nodes.len != 0:
|
||||
let x = toSet(nodes)
|
||||
let y = toSet(getWitness(db, trie.getRootHash(), key))
|
||||
let x = toHashSet(nodes)
|
||||
let y = toHashSet(getWitness(db, trie.getRootHash(), key))
|
||||
check x == y
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest,
|
||||
eth/trie/db,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest,
|
||||
nimcrypto/[keccak, hash],
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, sequtils, os,
|
||||
stew/ranges/typedranges, eth/trie/[hexary, db, trie_defs], nimcrypto/utils,
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
os, json, tables, sequtils, strutils, algorithm,
|
||||
eth/rlp/types, nimcrypto/utils,
|
||||
os, json, tables, strutils, algorithm,
|
||||
eth/rlp/types,
|
||||
eth/trie/[trie_defs, db, hexary],
|
||||
./testutils
|
||||
|
||||
proc `==`(lhs: JsonNode, rhs: string): bool =
|
||||
lhs.kind == JString and lhs.str == rhs
|
||||
|
||||
type
|
||||
TestOp = object
|
||||
idx: int
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest,
|
||||
eth/trie/nibbles
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, random,
|
||||
eth/trie/[trie_defs, db, sparse_binary, sparse_proofs],
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, macros, os,
|
||||
eth/trie/backends/[rocksdb_backend, sqlite_backend, lmdb_backend]
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, strutils, sequtils, os,
|
||||
unittest,
|
||||
eth/trie/[db, trie_defs], ./testutils,
|
||||
eth/rlp/types as rlpTypes
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ proc randList*(T: typedesc, strGen, listGen: RandGen, unique: bool = true): seq[
|
|||
let listLen = listGen.getVal()
|
||||
result = newSeqOfCap[T](listLen)
|
||||
if unique:
|
||||
var set = initSet[T]()
|
||||
var set = initHashSet[T]()
|
||||
for len in 0..<listLen:
|
||||
while true:
|
||||
let x = randPrimitives[T](strGen.getVal())
|
||||
|
|
Loading…
Reference in New Issue