diff --git a/doc/rlp.md b/doc/rlp.md index d472a8c..4098491 100644 --- a/doc/rlp.md +++ b/doc/rlp.md @@ -41,7 +41,7 @@ will be advanced just past the end of the consumed object. The `toXX` and `read` family of procs may raise a `RlpTypeMismatch` in case of type mismatch with the stream contents under the cursor. A corrupted -RLP stream or an attemp to read past the stream end will be signaled +RLP stream or an attempt to read past the stream end will be signaled with the `MalformedRlpError` exception. If the RLP stream includes data that cannot be processed on the current platform (e.g. an integer value that is too large), the library will raise an `UnsupportedRlpError` exception. diff --git a/doc/trie.md b/doc/trie.md index 80902c8..fddafd9 100644 --- a/doc/trie.md +++ b/doc/trie.md @@ -86,7 +86,7 @@ Constructor API: * init(BinaryTrie, DB, rootHash[optional]) Normally you would not set the rootHash when constructing an empty Binary-trie. -Setting the rootHash occured in a scenario where you have a populated DB +Setting the rootHash occurred in a scenario where you have a populated DB with existing trie structure and you know the rootHash, and then you want to continue/resume the trie operations. @@ -123,7 +123,7 @@ What kind of lie? actually, `delete` and `deleteSubtrie` doesn't remove the 'deleted' node from the underlying DB. It only make the node inaccessible from the user of the trie. The same also happened if you update the value of a key, the old value node is not removed from the underlying DB. -A more subtle lie also happened when you add new entrie into the trie using `set` operation. +A more subtle lie also happened when you add new entries into the trie using `set` operation. The previous hash of affected branch become obsolete and replaced by new hash, the old hash become inaccessible to the user. You may think that is a waste of storage space. @@ -230,7 +230,7 @@ Then we can write the clean tree into a new DB instance to replace the old one. ## Sparse Merkle Trie Sparse Merkle Trie(SMT) is a variant of Binary Trie which uses binary encoding to -represent path during trie travelsal. When Binary Trie uses three types of node, +represent path during trie traversal. When Binary Trie uses three types of node, SMT only use one type of node without any additional special encoding to store it's key-path. Actually, it doesn't even store it's key-path anywhere like Binary Trie, @@ -280,7 +280,7 @@ Constructor API: * init(SparseBinaryTrie, DB, rootHash[optional]) Normally you would not set the rootHash when constructing an empty Sparse Merkle Trie. -Setting the rootHash occured in a scenario where you have a populated DB +Setting the rootHash occurred in a scenario where you have a populated DB with existing trie structure and you know the rootHash, and then you want to continue/resume the trie operations. diff --git a/eth/db/kvstore.nim b/eth/db/kvstore.nim index fc28eb4..865ba96 100644 --- a/eth/db/kvstore.nim +++ b/eth/db/kvstore.nim @@ -19,7 +19,7 @@ export results type MemStoreRef* = ref object of RootObj records: Table[seq[byte], seq[byte]] - # TODO interaction with this table would benefit from heterogenous lookup + # TODO interaction with this table would benefit from heterogeneous lookup # (see `@key` below) # https://github.com/nim-lang/Nim/issues/7457 @@ -51,7 +51,7 @@ template put*(dbParam: KvStoreRef, key, val: openArray[byte]): KvResult[void] = db.putProc(db.obj, key, val) template get*(dbParam: KvStoreRef, key: openArray[byte], onData: untyped): KvResult[bool] = - ## Retrive value at ``key`` and call ``onData`` with the value. The data is + ## Retrieve value at ``key`` and call ``onData`` with the value. The data is ## valid for the duration of the callback. ## ``onData``: ``proc(data: openArray[byte])`` ## returns true if found and false otherwise. diff --git a/eth/db/kvstore_sqlite3.nim b/eth/db/kvstore_sqlite3.nim index d1d1d88..99469c8 100644 --- a/eth/db/kvstore_sqlite3.nim +++ b/eth/db/kvstore_sqlite3.nim @@ -59,7 +59,7 @@ template dispose*(db: SqliteStmt) = func isInsideTransaction*(db: SqStoreRef): bool = sqlite3_get_autocommit(db.env) == 0 - + proc release[T](x: var AutoDisposed[T]): T = result = x.val x.val = nil @@ -132,7 +132,7 @@ proc exec*[P](s: SqliteStmt[P, void], params: P): KvResult[void] = else: ok() - # release implict transaction + # release implicit transaction discard sqlite3_reset(s) # same return information as step discard sqlite3_clear_bindings(s) # no errors possible @@ -620,12 +620,12 @@ proc customScalarBlobFunction(ctx: ptr sqlite3_context, n: cint, v: ptr ptr sqli try: if s.isOk(): let bytes = s.unsafeGet() - # try is necessessary as otherwise nim marks SQLITE_TRANSIENT as throwning + # try is necessary as otherwise nim marks SQLITE_TRANSIENT as throwing # unlisted exception. # Using SQLITE_TRANSIENT destructor type, as it inform sqlite that data # under provided pointer may be deleted at any moment, which is the case # for seq[byte] as it is managed by nim gc. With this flag sqlite copy bytes - # under pointer and then realeases them itself. + # under pointer and then releases them itself. sqlite3_result_blob(ctx, unsafeAddr bytes[0], bytes.len.cint, SQLITE_TRANSIENT) else: let errMsg = s.error diff --git a/eth/keyfile/keyfile.nim b/eth/keyfile/keyfile.nim index da8a9ac..3e2b451 100644 --- a/eth/keyfile/keyfile.nim +++ b/eth/keyfile/keyfile.nim @@ -313,7 +313,7 @@ proc createKeyFileJson*(seckey: PrivateKey, ## ``version`` - version of keyfile format (default is 3) ## ``cryptkind`` - algorithm for private key encryption ## (default is AES128-CTR) - ## ``kdfkind`` - algorithm for key deriviation function (default is PBKDF2) + ## ``kdfkind`` - algorithm for key derivation function (default is PBKDF2) ## ``workfactor`` - Key deriviation function work factor, 0 is to use ## default workfactor. var iv: array[aes128.sizeBlock, byte] diff --git a/eth/keys.nim b/eth/keys.nim index 7699466..31642ae 100644 --- a/eth/keys.nim +++ b/eth/keys.nim @@ -9,7 +9,7 @@ # working with keys and signatures as they appear in Ethereum in particular: # # * Public keys as serialized in uncompressed format without the initial byte -# * Shared secrets are serialized in raw format without the intial byte +# * Shared secrets are serialized in raw format without the initial byte # * distinct types are used to avoid confusion with the "standard" secp types {.push raises: [Defect].} diff --git a/eth/net/utils.nim b/eth/net/utils.nim index ce496ae..8769d01 100644 --- a/eth/net/utils.nim +++ b/eth/net/utils.nim @@ -65,6 +65,6 @@ proc getRouteIpv4*(): Result[ValidIpAddress, cstring] = let ip = try: route.source.address() except ValueError as e: # This should not occur really. - error "Address convertion error", exception = e.name, msg = e.msg + error "Address conversion error", exception = e.name, msg = e.msg return err("Invalid IP address") ok(ValidIpAddress.init(ip)) diff --git a/eth/p2p/discovery.nim b/eth/p2p/discovery.nim index acff0ef..2c49128 100644 --- a/eth/p2p/discovery.nim +++ b/eth/p2p/discovery.nim @@ -25,7 +25,7 @@ const MAC_SIZE = 256 div 8 # 32 SIG_SIZE = 520 div 8 # 65 HEAD_SIZE = MAC_SIZE + SIG_SIZE # 97 - EXPIRATION = 60 # let messages expire after N secondes + EXPIRATION = 60 # let messages expire after N seconds PROTO_VERSION = 4 type diff --git a/eth/p2p/discoveryv5/encoding.nim b/eth/p2p/discoveryv5/encoding.nim index 17a9f6b..0927ad5 100644 --- a/eth/p2p/discoveryv5/encoding.nim +++ b/eth/p2p/discoveryv5/encoding.nim @@ -520,7 +520,7 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce, # Differently from an ordinary message, this is seen as an error as the # secrets just got negotiated in the handshake and thus decryption should # always work. We do not send a new Whoareyou on these as it probably means - # there is a compatiblity issue and we might loop forever in failed + # there is a compatibility issue and we might loop forever in failed # handshakes with this peer. return err("Decryption of message failed in handshake packet") diff --git a/eth/p2p/discoveryv5/ip_vote.nim b/eth/p2p/discoveryv5/ip_vote.nim index 20e8609..2392e8b 100644 --- a/eth/p2p/discoveryv5/ip_vote.nim +++ b/eth/p2p/discoveryv5/ip_vote.nim @@ -10,7 +10,7 @@ ## ## This allows the selection of a node its own public IP based on address ## information that is received from other nodes. -## This can be used in conjuction with discovery v5 ping-pong request responses +## This can be used in conjunction with discovery v5 ping-pong request responses ## that provide this information. ## To select the right address, a majority count is done. This is done over a ## sort of moving window as votes expire after `IpVoteTimeout`. diff --git a/eth/p2p/discoveryv5/lru.nim b/eth/p2p/discoveryv5/lru.nim index d4ffb41..160c2d6 100644 --- a/eth/p2p/discoveryv5/lru.nim +++ b/eth/p2p/discoveryv5/lru.nim @@ -5,7 +5,7 @@ import std/[tables, lists, options] type LRUCache*[K, V] = object of RootObj list: DoublyLinkedList[(K, V)] # Head is MRU k:v and tail is LRU k:v - table: Table[K, DoublyLinkedNode[(K, V)]] # DoublyLinkedNode is alraedy ref + table: Table[K, DoublyLinkedNode[(K, V)]] # DoublyLinkedNode is already ref capacity: int func init*[K, V](T: type LRUCache[K, V], capacity: int): LRUCache[K, V] = diff --git a/eth/p2p/discoveryv5/nodes_verification.nim b/eth/p2p/discoveryv5/nodes_verification.nim index 45fd89f..82984eb 100644 --- a/eth/p2p/discoveryv5/nodes_verification.nim +++ b/eth/p2p/discoveryv5/nodes_verification.nim @@ -40,7 +40,7 @@ proc verifyNodesRecords(enrs: openArray[Record], fromNode: Node, nodesLimit: int # The discovery v5 specification specifies no limit on the amount of ENRs # that can be returned, but clients usually stick with the bucket size limit # as in original Kademlia. Because of this it is chosen not to fail - # immediatly, but still process maximum `findNodeResultLimit`. + # immediately, but still process maximum `findNodeResultLimit`. if count >= nodesLimit: debug "Too many ENRs", enrs = enrs.len(), limit = nodesLimit, sender = fromNode.record.toURI diff --git a/eth/p2p/discoveryv5/protocol.nim b/eth/p2p/discoveryv5/protocol.nim index 0b4956e..fd1a747 100644 --- a/eth/p2p/discoveryv5/protocol.nim +++ b/eth/p2p/discoveryv5/protocol.nim @@ -490,7 +490,7 @@ proc replaceNode(d: Protocol, n: Node) = # peers in the routing table. debug "Message request to bootstrap node failed", enr = toURI(n.record) -# TODO: This could be improved to do the clean-up immediatily in case a non +# TODO: This could be improved to do the clean-up immediately in case a non # whoareyou response does arrive, but we would need to store the AuthTag # somewhere proc registerRequest(d: Protocol, n: Node, message: seq[byte], diff --git a/eth/p2p/discoveryv5/routing_table.nim b/eth/p2p/discoveryv5/routing_table.nim index 00f2965..74f836f 100644 --- a/eth/p2p/discoveryv5/routing_table.nim +++ b/eth/p2p/discoveryv5/routing_table.nim @@ -53,7 +53,7 @@ type ## time seen. First entry (head) is considered the most recently seen node ## and the last entry (tail) is considered the least recently seen node. ## Here "seen" means a successful request-response. This can also not have - ## occured yet. + ## occurred yet. replacementCache: seq[Node] ## Nodes that could not be added to the `nodes` ## seq as it is full and without stale nodes. This is practically a small ## LRU cache. @@ -70,12 +70,12 @@ type ## is possible that a malicious node could fill (poison) the routing table or ## a specific bucket with ENRs with IPs it does not control. The effect of ## this would be that a node that actually owns the IP could have a difficult - ## time getting its ENR distrubuted in the DHT and as a consequence would + ## time getting its ENR distributed in the DHT and as a consequence would ## not be reached from the outside as much (or at all). However, that node can ## still search and find nodes to connect to. So it would practically be a ## similar situation as a node that is not reachable behind the NAT because ## port mapping is not set up properly. - ## There is the possiblity to set the IP limit on verified (=contacted) nodes + ## There is the possibility to set the IP limit on verified (=contacted) nodes ## only, but that would allow for lookups to be done on a higher set of nodes ## owned by the same identity. This is a worse alternative. ## Next, doing lookups only on verified nodes would slow down discovery start @@ -321,7 +321,7 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus = ## Try to add the node to the routing table. ## ## First, an attempt will be done to add the node to the bucket in its range. - ## If this fails, the bucket will be split if it is eligable for splitting. + ## If this fails, the bucket will be split if it is eligible for splitting. ## If so, a new attempt will be done to add the node. If not, the node will be ## added to the replacement cache. ## diff --git a/eth/p2p/kademlia.nim b/eth/p2p/kademlia.nim index 4f89f18..774b91a 100644 --- a/eth/p2p/kademlia.nim +++ b/eth/p2p/kademlia.nim @@ -667,7 +667,7 @@ proc randomNodes*(k: KademliaProtocol, count: int): seq[Node] = result = newSeqOfCap[Node](count) var seen = initHashSet[Node]() - # This is a rather inneficient way of randomizing nodes from all buckets, but even if we + # This is a rather inefficient way of randomizing nodes from all buckets, but even if we # iterate over all nodes in the routing table, the time it takes would still be # insignificant compared to the time it takes for the network roundtrips when connecting # to nodes. diff --git a/eth/p2p/p2p_protocol_dsl.nim b/eth/p2p/p2p_protocol_dsl.nim index bee4e1b..8d4f8c8 100644 --- a/eth/p2p/p2p_protocol_dsl.nim +++ b/eth/p2p/p2p_protocol_dsl.nim @@ -54,7 +54,7 @@ type ## Cached ident for the timeout parameter extraDefs*: NimNode - ## The reponse procs have extra templates that must become + ## The response procs have extra templates that must become ## part of the generated code P2PProtocol* = ref object @@ -253,7 +253,7 @@ proc refreshParam(n: NimNode): NimNode = result = copyNimTree(n) if n.kind == nnkIdentDefs: for i in 0..