diff --git a/doc/p2p.md b/doc/p2p.md index fb7e8d3..d2534b2 100644 --- a/doc/p2p.md +++ b/doc/p2p.md @@ -67,7 +67,7 @@ the network. To start the connection process, call `node.connectToNetwork`: ``` nim proc connectToNetwork*(node: var EthereumNode, - bootstrapNodes: openarray[ENode], + bootstrapNodes: openArray[ENode], startListening = true, enableDiscovery = true) ``` @@ -106,7 +106,7 @@ p2pProtocol DevP2P(version = 0, rlpxName = "p2p"): proc hello(peer: Peer, version: uint, clientId: string, - capabilities: openarray[Capability], + capabilities: openArray[Capability], listenPort: uint, nodeId: P2PNodeId) = peer.id = nodeId @@ -195,7 +195,7 @@ There are few things to note in the above example: 2. Each message defined in the protocol received a corresponding type name, matching the message name (e.g. `p2p.hello`). This type will have fields - matching the parameter names of the message. If the messages has `openarray` + matching the parameter names of the message. If the messages has `openArray` params, these will be remapped to `seq` types. If the designated messages also has an attached handler, the future returned @@ -219,8 +219,8 @@ p2pProtocol les(version = 2): ... requestResponse: - proc getProofs(p: Peer, proofs: openarray[ProofRequest]) - proc proofs(p: Peer, BV: uint, proofs: openarray[Blob]) + proc getProofs(p: Peer, proofs: openArray[ProofRequest]) + proc proofs(p: Peer, BV: uint, proofs: openArray[Blob]) ... ``` diff --git a/eth.nimble b/eth.nimble index d4b5842..98e5445 100644 --- a/eth.nimble +++ b/eth.nimble @@ -19,8 +19,9 @@ requires "nim >= 1.2.0", "testutils", "unittest2" -var commonParams = " --verbosity:0 --hints:off --skipUserCfg:on --warning[ObservableStores]:off " & - getEnv("NIMFLAGS") & " " +let commonParams = " --verbosity:0 --hints:off --skipUserCfg:on " & + "--warning[ObservableStores]:off --styleCheck:usages --styleCheck:hint " & + getEnv("NIMFLAGS") & " " proc runTest(path: string, release: bool = true, chronosStrict = true) = echo "\nBuilding and running: ", path diff --git a/eth/bloom.nim b/eth/bloom.nim index b95d2ee..a8735a6 100644 --- a/eth/bloom.nim +++ b/eth/bloom.nim @@ -28,12 +28,12 @@ proc init*(_: type BloomFilter, h: MDigest[256]): BloomFilter = # TODO: The following 2 procs should be one genric, but it doesn't compile. Nim bug? proc incl*(f: var BloomFilter, v: string) = f.incl(keccak256.digest(v)) -proc incl*(f: var BloomFilter, v: openarray[byte]) = f.incl(keccak256.digest(v)) +proc incl*(f: var BloomFilter, v: openArray[byte]) = f.incl(keccak256.digest(v)) proc contains*(f: BloomFilter, h: MDigest[256]): bool = for bits in bloomBits(h): if (f.value and bits).isZero: return false return true -template contains*[T](f: BloomFilter, v: openarray[T]): bool = +template contains*[T](f: BloomFilter, v: openArray[T]): bool = f.contains(keccak256.digest(v)) diff --git a/eth/common/eth_types.nim b/eth/common/eth_types.nim index d3b04aa..1cd437e 100644 --- a/eth/common/eth_types.nim +++ b/eth/common/eth_types.nim @@ -304,13 +304,13 @@ proc append*(rlpWriter: var RlpWriter, value: StUint) = else: rlpWriter.append(value.truncate(int)) -proc read*(rlp: var Rlp, T: type Stint): T {.inline.} = +proc read*(rlp: var Rlp, T: type StInt): T {.inline.} = # The Ethereum Yellow Paper defines the RLP serialization only # for unsigned integers: {.fatal: "RLP serialization of signed integers is not allowed".} discard -proc append*(rlpWriter: var RlpWriter, value: Stint) = +proc append*(rlpWriter: var RlpWriter, value: StInt) = # The Ethereum Yellow Paper defines the RLP serialization only # for unsigned integers: {.fatal: "RLP serialization of signed integers is not allowed".} @@ -671,7 +671,7 @@ method getCodeByHash*(db: AbstractChainDB, hash: KeccakHash): Blob {.base, gcsaf method getSetting*(db: AbstractChainDB, key: string): seq[byte] {.base, gcsafe.} = notImplemented() -method setSetting*(db: AbstractChainDB, key: string, val: openarray[byte]) {.base, gcsafe.} = +method setSetting*(db: AbstractChainDB, key: string, val: openArray[byte]) {.base, gcsafe.} = notImplemented() method getHeaderProof*(db: AbstractChainDB, req: ProofRequest): Blob {.base, gcsafe.} = @@ -686,10 +686,10 @@ method getHelperTrieProof*(db: AbstractChainDB, req: HelperTrieProofRequest): Bl method getTransactionStatus*(db: AbstractChainDB, txHash: KeccakHash): TransactionStatusMsg {.base, gcsafe.} = notImplemented() -method addTransactions*(db: AbstractChainDB, transactions: openarray[Transaction]) {.base, gcsafe.} = +method addTransactions*(db: AbstractChainDB, transactions: openArray[Transaction]) {.base, gcsafe.} = notImplemented() -method persistBlocks*(db: AbstractChainDB, headers: openarray[BlockHeader], bodies: openarray[BlockBody]): ValidationResult {.base, gcsafe.} = +method persistBlocks*(db: AbstractChainDB, headers: openArray[BlockHeader], bodies: openArray[BlockBody]): ValidationResult {.base, gcsafe.} = notImplemented() method getForkId*(db: AbstractChainDB, n: BlockNumber): ForkID {.base, gcsafe.} = diff --git a/eth/db/kvstore_rocksdb.nim b/eth/db/kvstore_rocksdb.nim index e62cf63..fd02640 100644 --- a/eth/db/kvstore_rocksdb.nim +++ b/eth/db/kvstore_rocksdb.nim @@ -13,19 +13,19 @@ type RocksStoreRef* = ref object of RootObj store: RocksDBInstance -proc get*(db: RocksStoreRef, key: openarray[byte], onData: kvstore.DataProc): KvResult[bool] = +proc get*(db: RocksStoreRef, key: openArray[byte], onData: kvstore.DataProc): KvResult[bool] = db.store.get(key, onData) -proc find*(db: RocksStoreRef, prefix: openarray[byte], onFind: kvstore.KeyValueProc): KvResult[int] = +proc find*(db: RocksStoreRef, prefix: openArray[byte], onFind: kvstore.KeyValueProc): KvResult[int] = raiseAssert "Unimplemented" -proc put*(db: RocksStoreRef, key, value: openarray[byte]): KvResult[void] = +proc put*(db: RocksStoreRef, key, value: openArray[byte]): KvResult[void] = db.store.put(key, value) -proc contains*(db: RocksStoreRef, key: openarray[byte]): KvResult[bool] = +proc contains*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] = db.store.contains(key) -proc del*(db: RocksStoreRef, key: openarray[byte]): KvResult[void] = +proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[void] = db.store.del(key) proc close*(db: RocksStoreRef) = diff --git a/eth/db/kvstore_sqlite3.nim b/eth/db/kvstore_sqlite3.nim index bc9afe1..3def35e 100644 --- a/eth/db/kvstore_sqlite3.nim +++ b/eth/db/kvstore_sqlite3.nim @@ -461,7 +461,7 @@ proc init*( let name = if inMemory: ":memory:" - else: basepath / name & ".sqlite3" + else: basePath / name & ".sqlite3" flags = # For some reason, opening multiple in-memory databases doesn't work if # one of them is read-only - for now, disable read-only mode for them diff --git a/eth/keyfile/keyfile.nim b/eth/keyfile/keyfile.nim index 4278e79..da8a9ac 100644 --- a/eth/keyfile/keyfile.nim +++ b/eth/keyfile/keyfile.nim @@ -221,8 +221,8 @@ proc deriveKey(password: string, salt: string, proc encryptKey(seckey: PrivateKey, cryptkind: CryptKind, - key: openarray[byte], - iv: openarray[byte]): KfResult[array[KeyLength, byte]] = + key: openArray[byte], + iv: openArray[byte]): KfResult[array[KeyLength, byte]] = if cryptkind == AES128CTR: var crypttext: array[KeyLength, byte] var ctx: CTR[aes128] @@ -233,10 +233,10 @@ proc encryptKey(seckey: PrivateKey, else: err(NotImplemented) -proc decryptKey(ciphertext: openarray[byte], +proc decryptKey(ciphertext: openArray[byte], cryptkind: CryptKind, - key: openarray[byte], - iv: openarray[byte]): KfResult[array[KeyLength, byte]] = + key: openArray[byte], + iv: openArray[byte]): KfResult[array[KeyLength, byte]] = if cryptkind == AES128CTR: if len(iv) != aes128.sizeBlock: return err(IncorrectIV) @@ -295,7 +295,7 @@ proc decodeSalt(m: string): string = else: result = "" -proc compareMac(m1: openarray[byte], m2: openarray[byte]): bool = +proc compareMac(m1: openArray[byte], m2: openArray[byte]): bool = if len(m1) == len(m2) and len(m1) > 0: result = equalMem(unsafeAddr m1[0], unsafeAddr m2[0], len(m1)) diff --git a/eth/net/nat.nim b/eth/net/nat.nim index 38fc2dc..462b4ef 100644 --- a/eth/net/nat.nim +++ b/eth/net/nat.nim @@ -39,7 +39,7 @@ logScope: ## Also does threadvar initialisation. ## Must be called before redirectPorts() in each thread. proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] = - var externalIP: IPAddress + var externalIP: IpAddress if natStrategy == NatAny or natStrategy == NatUpnp: if upnp == nil: @@ -245,7 +245,7 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port, ## original ports as best effort. ## TODO: Allow for tcp or udp port mapping to be optional. let extIp = getExternalIP(natStrategy) - if extIP.isSome: + if extIp.isSome: let ip = ValidIpAddress.init(extIp.get) let extPorts = ({.gcsafe.}: redirectPorts(tcpPort = tcpPort, @@ -308,7 +308,7 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress, case natConfig.nat: of NatAny: - let bindAddress = initTAddress(bindIP, Port(0)) + let bindAddress = initTAddress(bindIp, Port(0)) if bindAddress.isAnyLocal(): let ip = getRouteIpv4() if ip.isErr(): @@ -326,11 +326,11 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress, return setupNat(natConfig.nat, tcpPort, udpPort, clientId) elif bindAddress.isPublic(): # When a specific public interface is provided, use that one. - return (some(ValidIpAddress.init(bindIP)), some(tcpPort), some(udpPort)) + return (some(ValidIpAddress.init(bindIp)), some(tcpPort), some(udpPort)) else: return setupNat(natConfig.nat, tcpPort, udpPort, clientId) of NatNone: - let bindAddress = initTAddress(bindIP, Port(0)) + let bindAddress = initTAddress(bindIp, Port(0)) if bindAddress.isAnyLocal(): let ip = getRouteIpv4() if ip.isErr(): @@ -345,7 +345,7 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress, return (none(ValidIpAddress), some(tcpPort), some(udpPort)) elif bindAddress.isPublic(): # When a specific public interface is provided, use that one. - return (some(ValidIpAddress.init(bindIP)), some(tcpPort), some(udpPort)) + return (some(ValidIpAddress.init(bindIp)), some(tcpPort), some(udpPort)) else: error "Bind IP is not a public IP address. Should not use --nat:none option" return (none(ValidIpAddress), some(tcpPort), some(udpPort)) diff --git a/eth/p2p/auth.nim b/eth/p2p/auth.nim index 7249287..a76b3b1 100644 --- a/eth/p2p/auth.nim +++ b/eth/p2p/auth.nim @@ -53,7 +53,7 @@ type HandshakeFlag* = enum Initiator, ## `Handshake` owner is connection initiator Responder, ## `Handshake` owner is connection responder - Eip8 ## Flag indicates that EIP-8 handshake is used + EIP8 ## Flag indicates that EIP-8 handshake is used AuthError* = enum EcdhError = "auth: ECDH shared secret could not be calculated" @@ -127,7 +127,7 @@ proc tryInit*( proc authMessagePreEIP8(h: var Handshake, rng: var BrHmacDrbgContext, pubkey: PublicKey, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = @@ -166,7 +166,7 @@ proc authMessagePreEIP8(h: var Handshake, proc authMessageEIP8(h: var Handshake, rng: var BrHmacDrbgContext, pubkey: PublicKey, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = @@ -225,7 +225,7 @@ proc authMessageEIP8(h: var Handshake, proc ackMessagePreEIP8(h: var Handshake, rng: var BrHmacDrbgContext, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = @@ -252,7 +252,7 @@ proc ackMessagePreEIP8(h: var Handshake, proc ackMessageEIP8(h: var Handshake, rng: var BrHmacDrbgContext, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = @@ -314,7 +314,7 @@ template ackSize*(h: Handshake, encrypt: bool = true): int = proc authMessage*(h: var Handshake, rng: var BrHmacDrbgContext, pubkey: PublicKey, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = ## Create new AuthMessage for specified `pubkey` and store it inside @@ -325,7 +325,7 @@ proc authMessage*(h: var Handshake, rng: var BrHmacDrbgContext, authMessagePreEIP8(h, rng, pubkey, output, outlen, flag, encrypt) proc ackMessage*(h: var Handshake, rng: var BrHmacDrbgContext, - output: var openarray[byte], + output: var openArray[byte], outlen: var int, flag: byte = 0, encrypt: bool = true): AuthResult[void] = ## Create new AckMessage and store it inside of `output`, size of generated @@ -335,7 +335,7 @@ proc ackMessage*(h: var Handshake, rng: var BrHmacDrbgContext, else: ackMessagePreEIP8(h, rng, output, outlen, flag, encrypt) -proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] = +proc decodeAuthMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes V4 AuthMessage. var buffer: array[PlainAuthMessageV4Length, byte] @@ -361,7 +361,7 @@ proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] ok() -proc decodeAuthMessageEip8(h: var Handshake, m: openarray[byte]): AuthResult[void] = +proc decodeAuthMessageEIP8(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes EIP-8 AuthMessage. let size = uint16.fromBytesBE(m) h.expectedLength = int(size) + 2 @@ -408,7 +408,7 @@ proc decodeAuthMessageEip8(h: var Handshake, m: openarray[byte]): AuthResult[voi except CatchableError: err(RlpError) -proc decodeAckMessageEip8*(h: var Handshake, m: openarray[byte]): AuthResult[void] = +proc decodeAckMessageEIP8*(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes EIP-8 AckMessage. let size = uint16.fromBytesBE(m) @@ -442,7 +442,7 @@ proc decodeAckMessageEip8*(h: var Handshake, m: openarray[byte]): AuthResult[voi except CatchableError: err(RlpError) -proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] = +proc decodeAckMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes V4 AckMessage. var buffer: array[PlainAckMessageV4Length, byte] @@ -457,7 +457,7 @@ proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] ok() -proc decodeAuthMessage*(h: var Handshake, input: openarray[byte]): AuthResult[void] = +proc decodeAuthMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] = ## Decodes AuthMessage from `input`. if len(input) < AuthMessageV4Length: return err(IncompleteError) @@ -466,12 +466,12 @@ proc decodeAuthMessage*(h: var Handshake, input: openarray[byte]): AuthResult[vo let res = h.decodeAuthMessageV4(input) if res.isOk(): return res - let res = h.decodeAuthMessageEip8(input) + let res = h.decodeAuthMessageEIP8(input) if res.isOk(): h.flags.incl(EIP8) res -proc decodeAckMessage*(h: var Handshake, input: openarray[byte]): AuthResult[void] = +proc decodeAckMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] = ## Decodes AckMessage from `input`. if len(input) < AckMessageV4Length: return err(IncompleteError) @@ -479,13 +479,13 @@ proc decodeAckMessage*(h: var Handshake, input: openarray[byte]): AuthResult[voi let res = h.decodeAckMessageV4(input) if res.isOk(): return res - let res = h.decodeAckMessageEip8(input) + let res = h.decodeAckMessageEIP8(input) if res.isOk(): h.flags.incl(EIP8) res proc getSecrets*( - h: Handshake, authmsg: openarray[byte], - ackmsg: openarray[byte]): ConnectionSecret = + h: Handshake, authmsg: openArray[byte], + ackmsg: openArray[byte]): ConnectionSecret = ## Derive secrets from handshake `h` using encrypted AuthMessage `authmsg` and ## encrypted AckMessage `ackmsg`. var diff --git a/eth/p2p/blockchain_utils.nim b/eth/p2p/blockchain_utils.nim index 064de96..b2ad84c 100644 --- a/eth/p2p/blockchain_utils.nim +++ b/eth/p2p/blockchain_utils.nim @@ -30,7 +30,7 @@ proc getBlockHeaders*(db: AbstractChainDB, req: BlocksRequest): seq[BlockHeader] template fetcher*(fetcherName, fetchingFunc, InputType, ResultType: untyped) = proc fetcherName*(db: AbstractChainDB, - lookups: openarray[InputType]): seq[ResultType] {.gcsafe.} = + lookups: openArray[InputType]): seq[ResultType] {.gcsafe.} = for lookup in lookups: let fetched = fetchingFunc(db, lookup) if fetched.hasData: @@ -47,6 +47,6 @@ fetcher getProofs, getProof, ProofRequest, Blob fetcher getHeaderProofs, getHeaderProof, ProofRequest, Blob proc getHelperTrieProofs*(db: AbstractChainDB, - reqs: openarray[HelperTrieProofRequest], + reqs: openArray[HelperTrieProofRequest], outNodes: var seq[Blob], outAuxData: var seq[Blob]) = discard diff --git a/eth/p2p/discovery.nim b/eth/p2p/discovery.nim index 238a276..ede6323 100644 --- a/eth/p2p/discovery.nim +++ b/eth/p2p/discovery.nim @@ -141,7 +141,7 @@ proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) = template flush() = block: let payload = rlp.encode((nodes, expiration())) - let msg = pack(cmdNeighbours, payload, d.privkey) + let msg = pack(cmdNeighbours, payload, d.privKey) trace "Neighbours to", node, nodes d.send(node, msg) nodes.setLen(0) @@ -155,7 +155,7 @@ proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) = if nodes.len != 0: flush() proc newDiscoveryProtocol*(privKey: PrivateKey, address: Address, - bootstrapNodes: openarray[ENode], rng = newRng() + bootstrapNodes: openArray[ENode], rng = newRng() ): DiscoveryProtocol = result.new() result.privKey = privKey @@ -214,7 +214,7 @@ proc recvFindNode(d: DiscoveryProtocol, node: Node, payload: openArray[byte]) let rng = rlp.listElem(0).toBytes # Check for pubkey len if rng.len == 64: - let nodeId = readUIntBE[256](rng[32 .. ^1]) + let nodeId = readUintBE[256](rng[32 .. ^1]) d.kademlia.recvFindNode(node, nodeId) else: trace "Invalid target public key received" diff --git a/eth/p2p/discoveryv5/dcli.nim b/eth/p2p/discoveryv5/dcli.nim index f0c89f9..c038f7b 100644 --- a/eth/p2p/discoveryv5/dcli.nim +++ b/eth/p2p/discoveryv5/dcli.nim @@ -187,7 +187,7 @@ proc run(config: DiscoveryConf) = echo "No Talk Response message returned" of noCommand: d.start() - waitfor(discover(d)) + waitFor(discover(d)) when isMainModule: let config = DiscoveryConf.load() diff --git a/eth/p2p/discoveryv5/encoding.nim b/eth/p2p/discoveryv5/encoding.nim index 64d59ba..7c35caf 100644 --- a/eth/p2p/discoveryv5/encoding.nim +++ b/eth/p2p/discoveryv5/encoding.nim @@ -89,7 +89,7 @@ type Codec* = object localNode*: Node privKey*: PrivateKey - handshakes*: Table[HandShakeKey, Challenge] + handshakes*: Table[HandshakeKey, Challenge] sessions*: Sessions DecodeResult*[T] = Result[T, cstring] @@ -101,7 +101,7 @@ func hash*(key: HandshakeKey): Hash = result = key.nodeId.hash !& key.address.hash result = !$result -proc idHash(challengeData, ephkey: openarray[byte], nodeId: NodeId): +proc idHash(challengeData, ephkey: openArray[byte], nodeId: NodeId): MDigest[256] = var ctx: sha256 ctx.init() @@ -113,16 +113,16 @@ proc idHash(challengeData, ephkey: openarray[byte], nodeId: NodeId): ctx.clear() proc createIdSignature*(privKey: PrivateKey, challengeData, - ephKey: openarray[byte], nodeId: NodeId): SignatureNR = + ephKey: openArray[byte], nodeId: NodeId): SignatureNR = signNR(privKey, SkMessage(idHash(challengeData, ephKey, nodeId).data)) -proc verifyIdSignature*(sig: SignatureNR, challengeData, ephKey: openarray[byte], - nodeId: NodeId, pubKey: PublicKey): bool = +proc verifyIdSignature*(sig: SignatureNR, challengeData, ephKey: openArray[byte], + nodeId: NodeId, pubkey: PublicKey): bool = let h = idHash(challengeData, ephKey, nodeId) - verify(sig, SkMessage(h.data), pubKey) + verify(sig, SkMessage(h.data), pubkey) -proc deriveKeys*(n1, n2: NodeID, priv: PrivateKey, pub: PublicKey, - challengeData: openarray[byte]): HandshakeSecrets = +proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey, + challengeData: openArray[byte]): HandshakeSecrets = let eph = ecdhRawFull(priv, pub) var info = newSeqOfCap[byte](keyAgreementPrefix.len + 32 * 2) @@ -138,7 +138,7 @@ proc deriveKeys*(n1, n2: NodeID, priv: PrivateKey, pub: PublicKey, toOpenArray(res, 0, sizeof(secrets) - 1)) secrets -proc encryptGCM*(key: AesKey, nonce, pt, authData: openarray[byte]): seq[byte] = +proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] = var ectx: GCM[aes128] ectx.init(key, nonce, authData) result = newSeq[byte](pt.len + gcmTagSize) @@ -146,7 +146,7 @@ proc encryptGCM*(key: AesKey, nonce, pt, authData: openarray[byte]): seq[byte] = ectx.getTag(result.toOpenArray(pt.len, result.high)) ectx.clear() -proc decryptGCM*(key: AesKey, nonce, ct, authData: openarray[byte]): +proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]): Option[seq[byte]] = if ct.len <= gcmTagSize: debug "cipher is missing tag", len = ct.len @@ -165,14 +165,14 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openarray[byte]): return some(res) -proc encryptHeader*(id: NodeId, iv, header: openarray[byte]): seq[byte] = +proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] = var ectx: CTR[aes128] ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv) result = newSeq[byte](header.len) ectx.encrypt(header, result) ectx.clear() -proc hasHandshake*(c: Codec, key: HandShakeKey): bool = +proc hasHandshake*(c: Codec, key: HandshakeKey): bool = c.handshakes.hasKey(key) proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int): @@ -185,7 +185,7 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int): result.add((uint16(authSize)).toBytesBE()) proc encodeMessagePacket*(rng: var BrHmacDrbgContext, c: var Codec, - toId: NodeID, toAddr: Address, message: openarray[byte]): + toId: NodeId, toAddr: Address, message: openArray[byte]): (seq[byte], AESGCMNonce) = var nonce: AESGCMNonce brHmacDrbgGenerate(rng, nonce) # Random AESGCM nonce @@ -228,7 +228,7 @@ proc encodeMessagePacket*(rng: var BrHmacDrbgContext, c: var Codec, return (packet, nonce) proc encodeWhoareyouPacket*(rng: var BrHmacDrbgContext, c: var Codec, - toId: NodeID, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64, + toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64, pubkey: Option[PublicKey]): seq[byte] = var idNonce: IdNonce brHmacDrbgGenerate(rng, idNonce) @@ -263,14 +263,14 @@ proc encodeWhoareyouPacket*(rng: var BrHmacDrbgContext, c: var Codec, recordSeq: recordSeq, challengeData: @iv & header) challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey) - key = HandShakeKey(nodeId: toId, address: toAddr) + key = HandshakeKey(nodeId: toId, address: toAddr) c.handshakes[key] = challenge return packet proc encodeHandshakePacket*(rng: var BrHmacDrbgContext, c: var Codec, - toId: NodeID, toAddr: Address, message: openarray[byte], + toId: NodeId, toAddr: Address, message: openArray[byte], whoareyouData: WhoareyouData, pubkey: PublicKey): seq[byte] = var header: seq[byte] var nonce: AESGCMNonce @@ -321,7 +321,7 @@ proc encodeHandshakePacket*(rng: var BrHmacDrbgContext, c: var Codec, return packet -proc decodeHeader*(id: NodeId, iv, maskedHeader: openarray[byte]): +proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]): DecodeResult[(StaticHeader, seq[byte])] = # No need to check staticHeader size as that is included in minimum packet # size check in decodePacket @@ -361,7 +361,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openarray[byte]): ok((StaticHeader(authdataSize: authdataSize, flag: flag, nonce: nonce), staticHeader & authdata)) -proc decodeMessage*(body: openarray[byte]): DecodeResult[Message] = +proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] = ## Decodes to the specific `Message` type. if body.len < 1: return err("No message data") @@ -388,7 +388,7 @@ proc decodeMessage*(body: openarray[byte]): DecodeResult[Message] = of unused: return err("Invalid message type") of ping: rlp.decode(message.ping) of pong: rlp.decode(message.pong) - of findNode: rlp.decode(message.findNode) + of findnode: rlp.decode(message.findNode) of nodes: rlp.decode(message.nodes) of talkreq: rlp.decode(message.talkreq) of talkresp: rlp.decode(message.talkresp) @@ -484,7 +484,7 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce, if header.len < staticHeaderSize + authdataHeadSize + int(sigSize) + int(ephKeySize): return err("Invalid header for handshake message packet") - let key = HandShakeKey(nodeId: srcId, address: fromAddr) + let key = HandshakeKey(nodeId: srcId, address: fromAddr) var challenge: Challenge if not c.handshakes.pop(key, challenge): return err("No challenge found: timed out or unsolicited packet") @@ -508,7 +508,7 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce, except RlpError, ValueError: return err("Invalid encoded ENR") - var pubKey: PublicKey + var pubkey: PublicKey var newNode: Option[Node] # TODO: Shall we return Node or Record? Record makes more sense, but we do # need the pubkey and the nodeid @@ -520,12 +520,12 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce, # Note: Not checking if the record seqNum is higher than the one we might # have stored as it comes from this node directly. - pubKey = node.pubKey + pubkey = node.pubkey newNode = some(node) else: # TODO: Hmm, should we still verify node id of the ENR of this node? if challenge.pubkey.isSome(): - pubKey = challenge.pubkey.get() + pubkey = challenge.pubkey.get() else: # We should have received a Record in this case. return err("Missing ENR in handshake packet") diff --git a/eth/p2p/discoveryv5/enr.nim b/eth/p2p/discoveryv5/enr.nim index 9065516..baeb9d9 100644 --- a/eth/p2p/discoveryv5/enr.nim +++ b/eth/p2p/discoveryv5/enr.nim @@ -94,9 +94,9 @@ proc `==`(a, b: Field): bool = proc cmp(a, b: FieldPair): int = cmp(a[0], b[0]) proc makeEnrRaw(seqNum: uint64, pk: PrivateKey, - pairs: openarray[FieldPair]): EnrResult[seq[byte]] = + pairs: openArray[FieldPair]): EnrResult[seq[byte]] = proc append(w: var RlpWriter, seqNum: uint64, - pairs: openarray[FieldPair]): seq[byte] = + pairs: openArray[FieldPair]): seq[byte] = w.append(seqNum) for (k, v) in pairs: w.append(k) @@ -124,7 +124,7 @@ proc makeEnrRaw(seqNum: uint64, pk: PrivateKey, ok(raw) proc makeEnrAux(seqNum: uint64, pk: PrivateKey, - pairs: openarray[FieldPair]): EnrResult[Record] = + pairs: openArray[FieldPair]): EnrResult[Record] = var record: Record record.pairs = @pairs record.seqNum = seqNum @@ -185,7 +185,7 @@ proc init*(T: type Record, seqNum: uint64, pk: PrivateKey, ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port], - extraFields: openarray[FieldPair] = []): + extraFields: openArray[FieldPair] = []): EnrResult[T] = ## Initialize a `Record` with given sequence number, private key, optional ## ip address, tcp port, udp port, and optional custom k:v pairs. @@ -263,7 +263,7 @@ proc find(r: Record, key: string): Option[int] = return some(i) proc update*(record: var Record, pk: PrivateKey, - fieldPairs: openarray[FieldPair]): EnrResult[void] = + fieldPairs: openArray[FieldPair]): EnrResult[void] = ## Update a `Record` k:v pairs. ## ## In case any of the k:v pairs is updated or added (new), the sequence number @@ -306,7 +306,7 @@ proc update*(record: var Record, pk: PrivateKey, proc update*(r: var Record, pk: PrivateKey, ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port] = none[Port](), - extraFields: openarray[FieldPair] = []): + extraFields: openArray[FieldPair] = []): EnrResult[void] = ## Update a `Record` with given ip address, tcp port, udp port and optional ## custom k:v pairs. @@ -362,7 +362,7 @@ proc contains*(r: Record, fp: (string, seq[byte])): bool = if field.get() == fp[1]: return true -proc verifySignatureV4(r: Record, sigData: openarray[byte], content: seq[byte]): +proc verifySignatureV4(r: Record, sigData: openArray[byte], content: seq[byte]): bool = let publicKey = r.get(PublicKey) if publicKey.isSome: @@ -441,7 +441,7 @@ proc fromBytesAux(r: var Record): bool {.raises: [RlpError, Defect].} = verifySignature(r) -proc fromBytes*(r: var Record, s: openarray[byte]): bool = +proc fromBytes*(r: var Record, s: openArray[byte]): bool = ## Loads ENR from rlp-encoded bytes, and validates the signature. r.raw = @s try: diff --git a/eth/p2p/discoveryv5/hkdf.nim b/eth/p2p/discoveryv5/hkdf.nim index 5c6a676..88ee7c9 100644 --- a/eth/p2p/discoveryv5/hkdf.nim +++ b/eth/p2p/discoveryv5/hkdf.nim @@ -1,7 +1,7 @@ import nimcrypto -proc hkdf*(HashType: typedesc, ikm, salt, info: openarray[byte], - output: var openarray[byte]) = +proc hkdf*(HashType: typedesc, ikm, salt, info: openArray[byte], + output: var openArray[byte]) = var ctx: HMAC[HashType] ctx.init(salt) ctx.update(ikm) diff --git a/eth/p2p/discoveryv5/messages.nim b/eth/p2p/discoveryv5/messages.nim index d6fbf56..ef31759 100644 --- a/eth/p2p/discoveryv5/messages.nim +++ b/eth/p2p/discoveryv5/messages.nim @@ -77,7 +77,7 @@ type of pong: pong*: PongMessage of findnode: - findNode*: FindNodeMessage + findnode*: FindNodeMessage of nodes: nodes*: NodesMessage of talkreq: @@ -98,7 +98,7 @@ type template messageKind*(T: typedesc[SomeMessage]): MessageKind = when T is PingMessage: ping elif T is PongMessage: pong - elif T is FindNodeMessage: findNode + elif T is FindNodeMessage: findnode elif T is NodesMessage: nodes elif T is TalkReqMessage: talkreq elif T is TalkRespMessage: talkresp diff --git a/eth/p2p/discoveryv5/node.nim b/eth/p2p/discoveryv5/node.nim index 6efb637..3b311b9 100644 --- a/eth/p2p/discoveryv5/node.nim +++ b/eth/p2p/discoveryv5/node.nim @@ -60,7 +60,7 @@ func newNode*(r: Record): Result[Node, cstring] = func update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress], tcpPort, udpPort: Option[Port] = none[Port](), - extraFields: openarray[FieldPair] = []): Result[void, cstring] = + extraFields: openArray[FieldPair] = []): Result[void, cstring] = ? n.record.update(pk, ip, tcpPort, udpPort, extraFields) if ip.isSome(): diff --git a/eth/p2p/discoveryv5/nodes_verification.nim b/eth/p2p/discoveryv5/nodes_verification.nim index 79e6821..45fd89f 100644 --- a/eth/p2p/discoveryv5/nodes_verification.nim +++ b/eth/p2p/discoveryv5/nodes_verification.nim @@ -2,7 +2,7 @@ import std/[sets, options], - stew/results, stew/shims/net, chronicles, chronos, + stew/results, stew/shims/net, chronicles, chronos, "."/[node, enr, routing_table] logScope: @@ -25,7 +25,7 @@ proc validIp(sender, address: IpAddress): bool = # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml return true -proc verifyNodesRecords(enrs: openarray[Record], fromNode: Node, nodesLimit: int, +proc verifyNodesRecords(enrs: openArray[Record], fromNode: Node, nodesLimit: int, distances: Option[seq[uint16]]): seq[Node] = ## Verify and convert ENRs to a sequence of nodes. Only ENRs that pass ## verification will be added. ENRs are verified for duplicates, invalid @@ -79,8 +79,8 @@ proc verifyNodesRecords(enrs: openarray[Record], fromNode: Node, nodesLimit: int seen.incl(n) result.add(n) -proc verifyNodesRecords*(enrs: openarray[Record], fromNode: Node, nodesLimit: int): seq[Node] = +proc verifyNodesRecords*(enrs: openArray[Record], fromNode: Node, nodesLimit: int): seq[Node] = verifyNodesRecords(enrs, fromNode, nodesLimit, none[seq[uint16]]()) -proc verifyNodesRecords*(enrs: openarray[Record], fromNode: Node, nodesLimit: int, distances: seq[uint16]): seq[Node] = +proc verifyNodesRecords*(enrs: openArray[Record], fromNode: Node, nodesLimit: int, distances: seq[uint16]): seq[Node] = verifyNodesRecords(enrs, fromNode, nodesLimit, some[seq[uint16]](distances)) diff --git a/eth/p2p/discoveryv5/protocol.nim b/eth/p2p/discoveryv5/protocol.nim index c112a9e..ec08cce 100644 --- a/eth/p2p/discoveryv5/protocol.nim +++ b/eth/p2p/discoveryv5/protocol.nim @@ -173,7 +173,7 @@ proc addNode*(d: Protocol, enr: EnrUri): bool = ## Returns false if no valid ENR URI, or on the conditions of `addNode` from ## an `Record`. var r: Record - let res = r.fromUri(enr) + let res = r.fromURI(enr) if res: return d.addNode(r) @@ -217,7 +217,7 @@ func getRecord*(d: Protocol): Record = d.localNode.record proc updateRecord*( - d: Protocol, enrFields: openarray[(string, seq[byte])]): DiscResult[void] = + d: Protocol, enrFields: openArray[(string, seq[byte])]): DiscResult[void] = ## Update the ENR of the local node with provided `enrFields` k:v pairs. let fields = mapIt(enrFields, toFieldPair(it[0], it[1])) d.localNode.record.update(d.privateKey, fields) @@ -246,7 +246,7 @@ proc send(d: Protocol, n: Node, data: seq[byte]) = d.send(n.address.get(), data) proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address, reqId: RequestId, - nodes: openarray[Node]) = + nodes: openArray[Node]) = proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address, message: NodesMessage, reqId: RequestId) {.nimcall.} = let (data, _) = encodeMessagePacket(d.rng[], d.codec, toId, toAddr, @@ -332,9 +332,9 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address, of ping: discovery_message_requests_incoming.inc() d.handlePing(srcId, fromAddr, message.ping, message.reqId) - of findNode: + of findnode: discovery_message_requests_incoming.inc() - d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId) + d.handleFindNode(srcId, fromAddr, message.findnode, message.reqId) of talkreq: discovery_message_requests_incoming.inc() d.handleTalkReq(srcId, fromAddr, message.talkreq, message.reqId) @@ -362,7 +362,7 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte], proc sendWhoareyou(d: Protocol, toId: NodeId, a: Address, requestNonce: AESGCMNonce, node: Option[Node]) = - let key = HandShakeKey(nodeId: toId, address: a) + let key = HandshakeKey(nodeId: toId, address: a) if not d.codec.hasHandshake(key): let recordSeq = if node.isSome(): node.get().record.seqNum @@ -791,7 +791,7 @@ proc populateTable*(d: Protocol) {.async.} = proc revalidateNode*(d: Protocol, n: Node) {.async.} = let pong = await d.ping(n) - if pong.isOK(): + if pong.isOk(): let res = pong.get() if res.enrSeq > n.record.seqNum: # Request new ENR @@ -883,8 +883,8 @@ proc ipMajorityLoop(d: Protocol) {.async.} = proc newProtocol*(privKey: PrivateKey, enrIp: Option[ValidIpAddress], enrTcpPort, enrUdpPort: Option[Port], - localEnrFields: openarray[(string, seq[byte])] = [], - bootstrapRecords: openarray[Record] = [], + localEnrFields: openArray[(string, seq[byte])] = [], + bootstrapRecords: openArray[Record] = [], previousRecord = none[enr.Record](), bindPort: Port, bindIp = IPv4_any(), diff --git a/eth/p2p/discoveryv5/random2.nim b/eth/p2p/discoveryv5/random2.nim index a299b8b..0ec72f0 100644 --- a/eth/p2p/discoveryv5/random2.nim +++ b/eth/p2p/discoveryv5/random2.nim @@ -13,10 +13,10 @@ proc rand*(rng: var BrHmacDrbgContext, max: Natural): int = if x < randMax - (randMax mod (uint64(max) + 1'u64)): # against modulo bias return int(x mod (uint64(max) + 1'u64)) -proc sample*[T](rng: var BrHmacDrbgContext, a: openarray[T]): T = +proc sample*[T](rng: var BrHmacDrbgContext, a: openArray[T]): T = result = a[rng.rand(a.high)] -proc shuffle*[T](rng: var BrHmacDrbgContext, a: var openarray[T]) = +proc shuffle*[T](rng: var BrHmacDrbgContext, a: var openArray[T]) = for i in countdown(a.high, 1): let j = rng.rand(i) swap(a[i], a[j]) diff --git a/eth/p2p/discoveryv5/routing_table.nim b/eth/p2p/discoveryv5/routing_table.nim index 25fd745..136ad39 100644 --- a/eth/p2p/discoveryv5/routing_table.nim +++ b/eth/p2p/discoveryv5/routing_table.nim @@ -93,7 +93,7 @@ type NoAddress # xor distance functions -func distance*(a, b: NodeId): Uint256 = +func distance*(a, b: NodeId): UInt256 = ## Calculate the distance to a NodeId. a xor b @@ -137,7 +137,7 @@ const XorDistanceCalculator* = DistanceCalculator(calculateDistance: distance, calculateLogDistance: logDistance, calculateIdAtDistance: idAtDistance) -func distance*(r: RoutingTable, a, b: NodeId): Uint256 = +func distance*(r: RoutingTable, a, b: NodeId): UInt256 = r.distanceCalculator.calculateDistance(a, b) func logDistance*(r: RoutingTable, a, b: NodeId): uint16 = @@ -172,7 +172,7 @@ proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool = return false # Check ip limit for routing table if not r.ipLimits.inc(ip): - b.iplimits.dec(ip) + b.ipLimits.dec(ip) return false return true @@ -225,7 +225,7 @@ proc inRange(k: KBucket, n: Node): bool = proc contains(k: KBucket, n: Node): bool = n in k.nodes -proc binaryGetBucketForNode*(buckets: openarray[KBucket], +proc binaryGetBucketForNode*(buckets: openArray[KBucket], id: NodeId): KBucket = ## Given a list of ordered buckets, returns the bucket for a given `NodeId`. ## Returns nil if no bucket in range for given `id` is found. @@ -233,13 +233,13 @@ proc binaryGetBucketForNode*(buckets: openarray[KBucket], cmp(a.iend, b) # Prevent cases where `lowerBound` returns an out of range index e.g. at empty - # openarray, or when the id is out range for all buckets in the openarray. + # openArray, or when the id is out range for all buckets in the openArray. if bucketPos < buckets.len: let bucket = buckets[bucketPos] if bucket.istart <= id and id <= bucket.iend: result = bucket -proc computeSharedPrefixBits(nodes: openarray[NodeId]): int = +proc computeSharedPrefixBits(nodes: openArray[NodeId]): int = ## Count the number of prefix bits shared by all nodes. if nodes.len < 2: return ID_SIZE @@ -266,7 +266,7 @@ proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop ## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper. RoutingTable( localNode: localNode, - buckets: @[KBucket.new(0.u256, high(Uint256), ipLimits.bucketIpLimit)], + buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)], bitsPerHop: bitsPerHop, ipLimits: IpLimits(limit: ipLimits.tableIpLimit), distanceCalculator: distanceCalculator, @@ -478,7 +478,7 @@ proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16], proc len*(r: RoutingTable): int = for b in r.buckets: result += b.len -proc moveRight[T](arr: var openarray[T], a, b: int) = +proc moveRight[T](arr: var openArray[T], a, b: int) = ## In `arr` move elements in range [a, b] right by 1. var t: T shallowCopy(t, arr[b + 1]) diff --git a/eth/p2p/ecies.nim b/eth/p2p/ecies.nim index d8dbb2b..d32167a 100644 --- a/eth/p2p/ecies.nim +++ b/eth/p2p/ecies.nim @@ -73,7 +73,7 @@ template eciesIvPos(): int = template eciesTagPos(size: int): int = 1 + sizeof(PublicKey) + aes128.sizeBlock + size -proc kdf*(data: openarray[byte]): array[KeyLength, byte] {.noInit.} = +proc kdf*(data: openArray[byte]): array[KeyLength, byte] {.noinit.} = ## NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1) var ctx: sha256 var counter: uint32 @@ -93,9 +93,9 @@ proc kdf*(data: openarray[byte]): array[KeyLength, byte] {.noInit.} = ctx.clear() # clean ctx copyMem(addr result[0], addr storage[0], KeyLength) -proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openarray[byte], - output: var openarray[byte], pubkey: PublicKey, - sharedmac: openarray[byte] = emptyMac): EciesResult[void] = +proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openArray[byte], + output: var openArray[byte], pubkey: PublicKey, + sharedmac: openArray[byte] = emptyMac): EciesResult[void] = ## Encrypt data with ECIES method using given public key `pubkey`. ## ``input`` - input data ## ``output`` - output data @@ -156,10 +156,10 @@ proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openarray[byte], ok() -proc eciesDecrypt*(input: openarray[byte], - output: var openarray[byte], +proc eciesDecrypt*(input: openArray[byte], + output: var openArray[byte], seckey: PrivateKey, - sharedmac: openarray[byte] = emptyMac): EciesResult[void] = + sharedmac: openArray[byte] = emptyMac): EciesResult[void] = ## Decrypt data with ECIES method using given private key `seckey`. ## ``input`` - input data ## ``output`` - output data diff --git a/eth/p2p/kademlia.nim b/eth/p2p/kademlia.nim index 9e620d6..c904647 100644 --- a/eth/p2p/kademlia.nim +++ b/eth/p2p/kademlia.nim @@ -144,7 +144,7 @@ proc isFull(k: KBucket): bool = k.len == BUCKET_SIZE proc contains(k: KBucket, n: Node): bool = n in k.nodes -proc binaryGetBucketForNode(buckets: openarray[KBucket], n: Node): +proc binaryGetBucketForNode(buckets: openArray[KBucket], n: Node): KBucket {.raises: [ValueError, Defect].} = ## Given a list of ordered buckets, returns the bucket for a given node. let bucketPos = lowerBound(buckets, n.id) do(a: KBucket, b: NodeId) -> int: @@ -158,7 +158,7 @@ proc binaryGetBucketForNode(buckets: openarray[KBucket], n: Node): if result.isNil: raise newException(ValueError, "No bucket found for node with id " & $n.id) -proc computeSharedPrefixBits(nodes: openarray[Node]): int = +proc computeSharedPrefixBits(nodes: openArray[Node]): int = ## Count the number of prefix bits shared by all nodes. if nodes.len < 2: return ID_SIZE @@ -176,7 +176,7 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int = proc init(r: var RoutingTable, thisNode: Node) = r.thisNode = thisNode - r.buckets = @[newKBucket(0.u256, high(Uint256))] + r.buckets = @[newKBucket(0.u256, high(UInt256))] randomize() # for later `randomNodes` selection proc splitBucket(r: var RoutingTable, index: int) = diff --git a/eth/p2p/p2p_backends_helpers.nim b/eth/p2p/p2p_backends_helpers.nim index 89861c5..75f4750 100644 --- a/eth/p2p/p2p_backends_helpers.nim +++ b/eth/p2p/p2p_backends_helpers.nim @@ -32,7 +32,7 @@ proc initProtocolState*[T](state: T, x: Peer|EthereumNode) {.gcsafe, raises: [Defect].} = discard -proc initProtocolStates(peer: Peer, protocols: openarray[ProtocolInfo]) +proc initProtocolStates(peer: Peer, protocols: openArray[ProtocolInfo]) {.raises: [Defect].} = # Initialize all the active protocol states newSeq(peer.protocolStates, allProtocols.len) diff --git a/eth/p2p/p2p_protocol_dsl.nim b/eth/p2p/p2p_protocol_dsl.nim index 81fac96..bee4e1b 100644 --- a/eth/p2p/p2p_protocol_dsl.nim +++ b/eth/p2p/p2p_protocol_dsl.nim @@ -272,7 +272,7 @@ proc chooseFieldType(n: NimNode): NimNode = ## and selects the corresponding field type for use in the ## message object type (i.e. `p2p.hello`). ## - ## For now, only openarray types are remapped to sequences. + ## For now, only openArray types are remapped to sequences. result = n if n.kind == nnkBracketExpr and eqIdent(n[0], "openArray"): result = n.copyNimTree @@ -352,7 +352,7 @@ proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = -1) = userHandlerProc.body.insert 0, prelude - # We allow the user handler to use `openarray` params, but we turn + # We allow the user handler to use `openArray` params, but we turn # those into sequences to make the `async` pragma happy. for i in 1 ..< userHandlerProc.params.len: var param = userHandlerProc.params[i] @@ -428,7 +428,7 @@ proc newMsg(protocol: P2PProtocol, kind: MessageKind, id: int, for param, paramType in procDef.typedInputParams(skip = 1): recFields.add newTree(nnkIdentDefs, newTree(nnkPostfix, ident("*"), param), # The fields are public - chooseFieldType(paramType), # some types such as openarray + chooseFieldType(paramType), # some types such as openArray newEmptyNode()) # are automatically remapped if recFields.len == 1 and protocol.useSingleRecordInlining: @@ -564,7 +564,7 @@ proc createSendProc*(msg: Message, newEmptyNode(), newStmtList()) ## body - if proctype == nnkProcDef: + if procType == nnkProcDef: for p in msg.procDef.pragma: if not eqIdent(p, "async"): def.addPragma p diff --git a/eth/p2p/rlpx.nim b/eth/p2p/rlpx.nim index b725173..bc0bb33 100644 --- a/eth/p2p/rlpx.nim +++ b/eth/p2p/rlpx.nim @@ -170,7 +170,7 @@ proc numProtocols(d: Dispatcher): int = d.activeProtocols.len proc getDispatcher(node: EthereumNode, - otherPeerCapabilities: openarray[Capability]): Dispatcher = + otherPeerCapabilities: openArray[Capability]): Dispatcher = # TODO: sub-optimal solution until progress is made here: # https://github.com/nim-lang/Nim/issues/7457 # We should be able to find an existing dispatcher without allocating a new one @@ -945,7 +945,7 @@ proc checkUselessPeer(peer: Peer) {.raises: [UselessPeerError, Defect].} = # XXX: Send disconnect + UselessPeer raise newException(UselessPeerError, "Useless peer") -proc initPeerState*(peer: Peer, capabilities: openarray[Capability]) +proc initPeerState*(peer: Peer, capabilities: openArray[Capability]) {.raises: [UselessPeerError, Defect].} = peer.dispatcher = getDispatcher(peer.network, capabilities) checkUselessPeer(peer) @@ -1015,7 +1015,7 @@ template `^`(arr): auto = # variable as an open array arr.toOpenArray(0, `arr Len` - 1) -proc initSecretState(hs: var Handshake, authMsg, ackMsg: openarray[byte], +proc initSecretState(hs: var Handshake, authMsg, ackMsg: openArray[byte], p: Peer) = var secrets = hs.getSecrets(authMsg, ackMsg) initSecretState(secrets, p.secretsState) @@ -1101,7 +1101,7 @@ proc rlpxConnect*(node: EthereumNode, remote: Node): Future[Peer] {.async.} = result.waitSingleMsg(DevP2P.hello), 10.seconds) - if not validatePubKeyInHello(response, remote.node.pubKey): + if not validatePubKeyInHello(response, remote.node.pubkey): warn "Remote nodeId is not its public key" # XXX: Do we care? trace "DevP2P handshake completed", peer = remote, @@ -1145,7 +1145,7 @@ proc rlpxAccept*(node: EthereumNode, result.network = node var handshake = - HandShake.tryInit(node.rng[], node.keys, {auth.Responder}).tryGet + Handshake.tryInit(node.rng[], node.keys, {auth.Responder}).tryGet var ok = false try: diff --git a/eth/p2p/rlpx_protocols/eth_protocol.nim b/eth/p2p/rlpx_protocols/eth_protocol.nim index cc50737..40c5834 100644 --- a/eth/p2p/rlpx_protocols/eth_protocol.nim +++ b/eth/p2p/rlpx_protocols/eth_protocol.nim @@ -71,10 +71,10 @@ p2pProtocol eth(version = protocolVersion, bestHash: KeccakHash, genesisHash: KeccakHash) - proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) = + proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) = discard - proc transactions(peer: Peer, transactions: openarray[Transaction]) = + proc transactions(peer: Peer, transactions: openArray[Transaction]) = discard requestResponse: @@ -85,17 +85,17 @@ p2pProtocol eth(version = protocolVersion, await response.send(peer.network.chain.getBlockHeaders(request)) - proc blockHeaders(p: Peer, headers: openarray[BlockHeader]) + proc blockHeaders(p: Peer, headers: openArray[BlockHeader]) requestResponse: - proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) {.gcsafe.} = + proc getBlockBodies(peer: Peer, hashes: openArray[KeccakHash]) {.gcsafe.} = if hashes.len > maxBodiesFetch: await peer.disconnect(BreachOfProtocol) return await response.send(peer.network.chain.getBlockBodies(hashes)) - proc blockBodies(peer: Peer, blocks: openarray[BlockBody]) + proc blockBodies(peer: Peer, blocks: openArray[BlockBody]) proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) = discard @@ -103,15 +103,15 @@ p2pProtocol eth(version = protocolVersion, nextID 13 requestResponse: - proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) = + proc getNodeData(peer: Peer, hashes: openArray[KeccakHash]) = await response.send(peer.network.chain.getStorageNodes(hashes)) - proc nodeData(peer: Peer, data: openarray[Blob]) + proc nodeData(peer: Peer, data: openArray[Blob]) requestResponse: - proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) = discard + proc getReceipts(peer: Peer, hashes: openArray[KeccakHash]) = discard # TODO: implement `getReceipts` and reactivate this code # await response.send(peer.network.chain.getReceipts(hashes)) - proc receipts(peer: Peer, receipts: openarray[Receipt]) + proc receipts(peer: Peer, receipts: openArray[Receipt]) diff --git a/eth/p2p/rlpx_protocols/les_protocol.nim b/eth/p2p/rlpx_protocols/les_protocol.nim index 88c69ea..520b11e 100644 --- a/eth/p2p/rlpx_protocols/les_protocol.nim +++ b/eth/p2p/rlpx_protocols/les_protocol.nim @@ -144,13 +144,13 @@ template updateBV: BufValueInt = bufValueAfterRequest(lesNetwork, lesPeer, perProtocolMsgId, requestCostQuantity) -func getValue(values: openarray[KeyValuePair], +func getValue(values: openArray[KeyValuePair], key: string, T: typedesc): Option[T] = for v in values: if v.key == key: return some(rlp.decode(v.value, T)) -func getRequiredValue(values: openarray[KeyValuePair], +func getRequiredValue(values: openArray[KeyValuePair], key: string, T: typedesc): T = for v in values: if v.key == key: @@ -166,7 +166,7 @@ p2pProtocol les(version = lesVersion, incomingRequestDecorator = incomingRequestDecorator, incomingResponseThunkDecorator = incomingResponseDecorator): handshake: - proc status(p: Peer, values: openarray[KeyValuePair]) + proc status(p: Peer, values: openArray[KeyValuePair]) onPeerConnected do (peer: Peer): let @@ -254,7 +254,7 @@ p2pProtocol les(version = lesVersion, headNumber: BlockNumber, headTotalDifficulty: DifficultyInt, reorgDepth: BlockNumber, - values: openarray[KeyValuePair], + values: openArray[KeyValuePair], announceType: AnnounceType) = if peer.state.announceType == AnnounceType.None: @@ -288,7 +288,7 @@ p2pProtocol les(version = lesVersion, proc blockHeaders( peer: Peer, bufValue: BufValueInt, - blocks: openarray[BlockHeader]) + blocks: openArray[BlockHeader]) ## On-damand data retrieval ## @@ -296,7 +296,7 @@ p2pProtocol les(version = lesVersion, requestResponse: proc getBlockBodies( peer: Peer, - blocks: openarray[KeccakHash]) {. + blocks: openArray[KeccakHash]) {. costQuantity(blocks.len, max = maxBodiesFetch), gcsafe.} = let blocks = peer.network.chain.getBlockBodies(blocks) @@ -305,12 +305,12 @@ p2pProtocol les(version = lesVersion, proc blockBodies( peer: Peer, bufValue: BufValueInt, - bodies: openarray[BlockBody]) + bodies: openArray[BlockBody]) requestResponse: proc getReceipts( peer: Peer, - hashes: openarray[KeccakHash]) + hashes: openArray[KeccakHash]) {.costQuantity(hashes.len, max = maxReceiptsFetch).} = let receipts = peer.network.chain.getReceipts(hashes) @@ -319,12 +319,12 @@ p2pProtocol les(version = lesVersion, proc receipts( peer: Peer, bufValue: BufValueInt, - receipts: openarray[Receipt]) + receipts: openArray[Receipt]) requestResponse: proc getProofs( peer: Peer, - proofs: openarray[ProofRequest]) {. + proofs: openArray[ProofRequest]) {. costQuantity(proofs.len, max = maxProofsFetch).} = let proofs = peer.network.chain.getProofs(proofs) @@ -333,7 +333,7 @@ p2pProtocol les(version = lesVersion, proc proofs( peer: Peer, bufValue: BufValueInt, - proofs: openarray[Blob]) + proofs: openArray[Blob]) requestResponse: proc getContractCodes( @@ -354,7 +354,7 @@ p2pProtocol les(version = lesVersion, requestResponse: proc getHeaderProofs( peer: Peer, - reqs: openarray[ProofRequest]) {. + reqs: openArray[ProofRequest]) {. costQuantity(reqs.len, max = maxHeaderProofsFetch).} = let proofs = peer.network.chain.getHeaderProofs(reqs) @@ -363,12 +363,12 @@ p2pProtocol les(version = lesVersion, proc headerProofs( peer: Peer, bufValue: BufValueInt, - proofs: openarray[Blob]) + proofs: openArray[Blob]) requestResponse: proc getHelperTrieProofs( peer: Peer, - reqs: openarray[HelperTrieProofRequest]) {. + reqs: openArray[HelperTrieProofRequest]) {. costQuantity(reqs.len, max = maxProofsFetch).} = var nodes, auxData: seq[Blob] @@ -387,7 +387,7 @@ p2pProtocol les(version = lesVersion, requestResponse: proc sendTxV2( peer: Peer, - transactions: openarray[Transaction]) {. + transactions: openArray[Transaction]) {. costQuantity(transactions.len, max = maxTransactionsFetch).} = let chain = peer.network.chain @@ -409,7 +409,7 @@ p2pProtocol les(version = lesVersion, proc getTxStatus( peer: Peer, - transactions: openarray[Transaction]) {. + transactions: openArray[Transaction]) {. costQuantity(transactions.len, max = maxTransactionsFetch).} = let chain = peer.network.chain @@ -422,7 +422,7 @@ p2pProtocol les(version = lesVersion, proc txStatus( peer: Peer, bufValue: BufValueInt, - transactions: openarray[TransactionStatusMsg]) + transactions: openArray[TransactionStatusMsg]) proc configureLes*(node: EthereumNode, # Client options: diff --git a/eth/p2p/rlpx_protocols/whisper/whisper_types.nim b/eth/p2p/rlpx_protocols/whisper/whisper_types.nim index 4111c74..b14fadb 100644 --- a/eth/p2p/rlpx_protocols/whisper/whisper_types.nim +++ b/eth/p2p/rlpx_protocols/whisper/whisper_types.nim @@ -189,7 +189,7 @@ proc fullBloom*(): Bloom = for i in 0.. maxUInt24: @@ -163,8 +163,8 @@ proc encryptMsg*(msg: openarray[byte], secrets: var SecretState): seq[byte] = proc getBodySize*(a: RlpxHeader): int = (int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2]) -proc decryptHeader*(c: var SecretState, data: openarray[byte], - output: var openarray[byte]): RlpxResult[void] = +proc decryptHeader*(c: var SecretState, data: openArray[byte], + output: var openArray[byte]): RlpxResult[void] = ## Decrypts header `data` using SecretState `c` context and store ## result into `output`. ## @@ -201,15 +201,15 @@ proc decryptHeader*(c: var SecretState, data: openarray[byte], result = ok() proc decryptHeaderAndGetMsgSize*(c: var SecretState, - encryptedHeader: openarray[byte], + encryptedHeader: openArray[byte], outSize: var int): RlpxResult[void] = var decryptedHeader: RlpxHeader result = decryptHeader(c, encryptedHeader, decryptedHeader) if result.isOk(): outSize = decryptedHeader.getBodySize -proc decryptBody*(c: var SecretState, data: openarray[byte], bodysize: int, - output: var openarray[byte], outlen: var int): RlpxResult[void] = +proc decryptBody*(c: var SecretState, data: openArray[byte], bodysize: int, + output: var openArray[byte], outlen: var int): RlpxResult[void] = ## Decrypts body `data` using SecretState `c` context and store ## result into `output`. ## diff --git a/eth/rlp.nim b/eth/rlp.nim index a77475b..4c9abfc 100644 --- a/eth/rlp.nim +++ b/eth/rlp.nim @@ -353,7 +353,7 @@ proc readImpl[E](rlp: var Rlp, T: type seq[E]): T = for elem in rlp: result.add rlp.read(E) -proc readImpl[E](rlp: var Rlp, T: type openarray[E]): seq[E] = +proc readImpl[E](rlp: var Rlp, T: type openArray[E]): seq[E] = result = readImpl(rlp, seq[E]) proc readImpl(rlp: var Rlp, T: type[object|tuple], @@ -406,7 +406,7 @@ proc `>>`*[T](rlp: var Rlp, location: var T) = template readRecordType*(rlp: var Rlp, T: type, wrappedInList: bool): auto = readImpl(rlp, T, wrappedInList) -proc decode*(bytes: openarray[byte]): RlpNode = +proc decode*(bytes: openArray[byte]): RlpNode = var rlp = rlpFromBytes(bytes) rlp.toNodes diff --git a/eth/rlp/writer.nim b/eth/rlp/writer.nim index 5854cb3..2976e38 100644 --- a/eth/rlp/writer.nim +++ b/eth/rlp/writer.nim @@ -130,10 +130,10 @@ proc appendBlob(self: var RlpWriter, data: openArray[byte], startMarker: byte) = proc appendImpl(self: var RlpWriter, data: string) = appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER) -proc appendBlob(self: var RlpWriter, data: openarray[byte]) = +proc appendBlob(self: var RlpWriter, data: openArray[byte]) = appendBlob(self, data, BLOB_START_MARKER) -proc appendBlob(self: var RlpWriter, data: openarray[char]) = +proc appendBlob(self: var RlpWriter, data: openArray[char]) = appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER) proc appendInt(self: var RlpWriter, i: Integer) = @@ -169,10 +169,10 @@ template appendImpl(self: var RlpWriter, e: enum) = template appendImpl(self: var RlpWriter, b: bool) = appendImpl(self, int(b)) -proc appendImpl[T](self: var RlpWriter, listOrBlob: openarray[T]) = +proc appendImpl[T](self: var RlpWriter, listOrBlob: openArray[T]) = mixin append - # TODO: This append proc should be overloaded by `openarray[byte]` after + # TODO: This append proc should be overloaded by `openArray[byte]` after # nim bug #7416 is fixed. when T is (byte or char): self.appendBlob(listOrBlob) diff --git a/eth/trie/binary.nim b/eth/trie/binary.nim index 749021b..51e6007 100644 --- a/eth/trie/binary.nim +++ b/eth/trie/binary.nim @@ -74,13 +74,13 @@ proc hashAndSave*(self: BinaryTrie, node: openArray[byte]): TrieNodeKey = self.db.put(result, node) template saveKV(self: BinaryTrie, keyPath: TrieBitSeq | bool, child: openArray[byte]): untyped = - self.hashAndsave(encodeKVNode(keyPath, child)) + self.hashAndSave(encodeKVNode(keyPath, child)) template saveLeaf(self: BinaryTrie, value: openArray[byte]): untyped = - self.hashAndsave(encodeLeafNode(value)) + self.hashAndSave(encodeLeafNode(value)) template saveBranch(self: BinaryTrie, L, R: openArray[byte]): untyped = - self.hashAndsave(encodeBranchNode(L, R)) + self.hashAndSave(encodeBranchNode(L, R)) proc setBranchNode(self: BinaryTrie, keyPath: TrieBitSeq, node: TrieNode, value: openArray[byte], deleteSubtrie = false): TrieNodeKey diff --git a/eth/trie/branches.nim b/eth/trie/branches.nim index e3a9cde..84a02e3 100644 --- a/eth/trie/branches.nim +++ b/eth/trie/branches.nim @@ -145,7 +145,7 @@ proc getWitnessImpl*(db: DB; nodeHash: TrieNodeKey; keyPath: TrieBitSeq; output: raise newException(InvalidKeyError, "Key too long") of KV_TYPE: output.add nodeVal - if keyPath.len < node.keyPath.len and node.keyPath[0.. distUp): # averageDelayBase is smaller that actualDelay, sample should be positive int64(distUp) diff --git a/eth/utp/delay_histogram.nim b/eth/utp/delay_histogram.nim index 7a4875d..7b324da 100644 --- a/eth/utp/delay_histogram.nim +++ b/eth/utp/delay_histogram.nim @@ -6,7 +6,7 @@ {.push raises: [Defect].} -import +import chronos, ./utp_utils @@ -45,8 +45,8 @@ proc addSample*(h: var DelayHistogram, sample: uint32, currentTime: Moment) = h.delayBaseHistory[h.delayBaseIdx] = sample if wrapCompareLess(sample, h.delayBase): - h.delay_base = sample - + h.delayBase = sample + let delay = sample - h.delayBase h.currentDelayHistory[h.currentDelyIdx] = delay diff --git a/eth/utp/growable_buffer.nim b/eth/utp/growable_buffer.nim index 796ffda..0babfaf 100644 --- a/eth/utp/growable_buffer.nim +++ b/eth/utp/growable_buffer.nim @@ -16,19 +16,19 @@ export options # utp implementation. # Another alternative would be to use standard deque from deques module, and caluclate # item indexes from their sequence numbers. -type +type GrowableCircularBuffer*[A] = object items: seq[Option[A]] mask: int # provided size will always be adjusted to next power of two -proc init*[A](T: type GrowableCircularBuffer[A], size: Natural = 16): T = +proc init*[A](T: type GrowableCircularBuffer[A], size: Natural = 16): T = let powOfTwoSize = nextPowerOfTwo(size) T( items: newSeq[Option[A]](size), mask: powOfTwoSize - 1 ) - + proc get*[A](buff: GrowableCircularBuffer[A], i: Natural): Option[A] = buff.items[i and buff.mask] @@ -52,7 +52,7 @@ proc exists*[A](buff: GrowableCircularBuffer[A], i: Natural, check: proc (x: A): else: false -proc `[]`*[A](buff: var GrowableCircularBuffer[A], i: Natural): var A = +proc `[]`*[A](buff: var GrowableCircularBuffer[A], i: Natural): var A = ## Returns contents of the `var GrowableCircularBuffer`. If it is not set, then an exception ## is thrown. buff.items[i and buff.mask].get() diff --git a/eth/utp/ledbat_congestion_control.nim b/eth/utp/ledbat_congestion_control.nim index 41b0c01..85ef201 100644 --- a/eth/utp/ledbat_congestion_control.nim +++ b/eth/utp/ledbat_congestion_control.nim @@ -32,10 +32,10 @@ proc applyCongestionControl*( minRtt: Duration, calculatedDelay: Duration, clockDrift: int32 -): (uint32, uint32, bool) = +): (uint32, uint32, bool) = if (actualDelay.isZero() or minRtt.isZero() or numOfAckedBytes == 0): return (currentMaxWindowSize, currentSlowStartTreshold, currentSlowStart) - + let ourDelay = min(minRtt, calculatedDelay) let target = targetDelay @@ -64,7 +64,7 @@ proc applyCongestionControl*( # double window_factor = (double)min(bytes_acked, max_window) / (double)max(max_window, bytes_acked); # double delay_factor = off_target / target; # double scaled_gain = MAX_CWND_INCREASE_BYTES_PER_RTT * window_factor * delay_factor; - + let windowFactor = float64(min(numOfAckedBytes, currentMaxWindowSize)) / float64(max(currentMaxWindowSize, numOfAckedBytes)) let delayFactor = float64(offTarget) / float64(target.microseconds()) @@ -73,7 +73,7 @@ proc applyCongestionControl*( let scaledWindow = float64(currentMaxWindowSize) + scaledGain - let ledbatCwnd: uint32 = + let ledbatCwnd: uint32 = if scaledWindow < minWindowSize: uint32(minWindowSize) else: diff --git a/eth/utp/packets.nim b/eth/utp/packets.nim index c8bcd07..ef3045a 100644 --- a/eth/utp/packets.nim +++ b/eth/utp/packets.nim @@ -15,14 +15,14 @@ import export results -const +const minimalHeaderSize = 20 minimalHeaderSizeWithSelectiveAck = 26 protocolVersion = 1 zeroMoment = Moment.init(0, Nanosecond) acksArrayLength: uint8 = 4 -type +type PacketType* = enum ST_DATA = 0, ST_FIN = 1, @@ -65,7 +65,7 @@ type # 2. Monotonicity # Reference lib have a lot of checks to assume that this is monotonic on # every system, and warnings when monotonic clock is not avaialable. -proc getMonoTimestamp*(): TimeStampInfo = +proc getMonoTimestamp*(): TimeStampInfo = let currentMoment = Moment.now() # Casting this value from int64 to uin32, my lead to some sudden spikes in @@ -75,7 +75,7 @@ proc getMonoTimestamp*(): TimeStampInfo = # uTP implementation is resistant to those spikes are as it keeps history of # few last delays on uses smallest one for calculating ledbat window. # so any outlier huge value will be ignored - # + # let timestamp = uint32((currentMoment - zeroMoment).microseconds()) TimeStampInfo(moment: currentMoment, timestamp: timestamp) @@ -130,7 +130,7 @@ proc encodePacket*(p: Packet): seq[byte] = except IOError as e: # This should not happen in case of in-memory streams raiseAssert e.msg - + proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] = let receivedBytesLength = len(bytes) if receivedBytesLength < minimalHeaderSize: @@ -139,11 +139,11 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] = let version = bytes[0] and 0xf if version != protocolVersion: return err("invalid packet version") - + var kind: PacketType if not checkedEnumAssign(kind, (bytes[0] shr 4)): return err("Invalid message type") - + let extensionByte = bytes[1] if (not (extensionByte == 0 or extensionByte == 1)): @@ -161,7 +161,7 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] = seq_nr: fromBytesBE(uint16, bytes.toOpenArray(16, 17)), ack_nr: fromBytesBE(uint16, bytes.toOpenArray(18, 19)), ) - + if extensionByte == 0: # packet without any extensions let payload = @@ -175,7 +175,7 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] = # packet with selective ack extension if (receivedBytesLength < minimalHeaderSizeWithSelectiveAck): return err("Packet too short for selective ack extension") - + let nextExtension = bytes[20] let extLength = bytes[21] @@ -186,11 +186,11 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] = # as 4byte bit mask is able to ack 32 packets in the future which is more than enough if (nextExtension != 0 or extLength != 4): return err("Bad format of selective ack extension") - + let extension = SelectiveAckExtension( acks: [bytes[22], bytes[23], bytes[24], bytes[25]] - ) + ) let payload = if (receivedBytesLength == minimalHeaderSizeWithSelectiveAck): @@ -235,7 +235,7 @@ proc ackPacket*( bufferSize: uint32, timestampDiff: uint32, acksBitmask: Option[array[4, byte]] = none[array[4, byte]]() - ): Packet = + ): Packet = let (extensionByte, extensionData) = if acksBitmask.isSome(): @@ -254,7 +254,6 @@ proc ackPacket*( seqNr: seqNr, ackNr: ackNr ) - Packet(header: h, eack: extensionData, payload: @[]) @@ -265,7 +264,7 @@ proc dataPacket*( bufferSize: uint32, payload: seq[byte], timestampDiff: uint32 -): Packet = +): Packet = let h = PacketHeaderV1( pType: ST_DATA, version: protocolVersion, @@ -278,10 +277,10 @@ proc dataPacket*( seqNr: seqNr, ackNr: ackNr ) - + Packet(header: h, eack: none[SelectiveAckExtension](), payload: payload) -proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet = +proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet = let h = PacketHeaderV1( pType: ST_RESET, version: protocolVersion, @@ -296,7 +295,7 @@ proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet seqNr: seqNr, ackNr: ackNr ) - + Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[]) proc finPacket*( @@ -305,7 +304,7 @@ proc finPacket*( ackNr: uint16, bufferSize: uint32, timestampDiff: uint32 -): Packet = +): Packet = let h = PacketHeaderV1( pType: ST_FIN, version: protocolVersion, @@ -318,5 +317,5 @@ proc finPacket*( seqNr: seqNr, ackNr: ackNr ) - + Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[]) diff --git a/eth/utp/send_buffer_tracker.nim b/eth/utp/send_buffer_tracker.nim index ce8268f..ed480ed 100644 --- a/eth/utp/send_buffer_tracker.nim +++ b/eth/utp/send_buffer_tracker.nim @@ -31,7 +31,7 @@ type SendBufferTracker* = ref object proc new*( T: type SendBufferTracker, currentWindow: uint32, - maxRemoteWindow: uint32, + maxRemoteWindow: uint32, maxSndBufferSize: uint32, maxWindow: uint32): T = return ( @@ -70,7 +70,7 @@ proc updateMaxRemote*(t: SendBufferTracker, newRemoteWindow: uint32) = t.maxRemoteWindow = newRemoteWindow t.checkWaiters() -proc updateMaxWindowSize*(t: SendBufferTracker, newRemoteWindow: uint32, maxWindow: uint32) = +proc updateMaxWindowSize*(t: SendBufferTracker, newRemoteWindow: uint32, maxWindow: uint32) = t.maxRemoteWindow = newRemoteWindow t.maxWindow = maxWindow t.checkWaiters() diff --git a/eth/utp/utp_discv5_protocol.nim b/eth/utp/utp_discv5_protocol.nim index a990e16..4cd26f8 100644 --- a/eth/utp/utp_discv5_protocol.nim +++ b/eth/utp/utp_discv5_protocol.nim @@ -28,7 +28,7 @@ proc hash(x: UtpSocketKey[Node]): Hash = proc initSendCallback( t: protocol.Protocol, subProtocolName: seq[byte]): SendCallback[Node] = return ( - proc (to: Node, data: seq[byte]): Future[void] = + proc (to: Node, data: seq[byte]): Future[void] = let fut = newFuture[void]() # TODO: In discovery v5 each talkreq waits for a talkresp, but here we # would really like the fire and forget semantics (similar to udp). @@ -54,7 +54,7 @@ proc messageHandler(protocol: TalkProtocol, request: seq[byte], @[] else: @[] - + proc new*( T: type UtpDiscv5Protocol, p: protocol.Protocol, diff --git a/eth/utp/utp_protocol.nim b/eth/utp/utp_protocol.nim index 70e1caf..38ddd93 100644 --- a/eth/utp/utp_protocol.nim +++ b/eth/utp/utp_protocol.nim @@ -68,18 +68,18 @@ proc processDatagram(transp: DatagramTransport, raddr: TransportAddress): proc initSendCallback(t: DatagramTransport): SendCallback[TransportAddress] = return ( - proc (to: TransportAddress, data: seq[byte]): Future[void] = + proc (to: TransportAddress, data: seq[byte]): Future[void] = t.sendTo(to, data) ) proc new*( - T: type UtpProtocol, - acceptConnectionCb: AcceptConnectionCallback[TransportAddress], + T: type UtpProtocol, + acceptConnectionCb: AcceptConnectionCallback[TransportAddress], address: TransportAddress, socketConfig: SocketConfig = SocketConfig.init(), allowConnectionCb: AllowConnectionCallback[TransportAddress] = nil, rng = newRng()): UtpProtocol {.raises: [Defect, CatchableError].} = - + doAssert(not(isNil(acceptConnectionCb))) let router = UtpRouter[TransportAddress].new( @@ -97,7 +97,7 @@ proc shutdownWait*(p: UtpProtocol): Future[void] {.async.} = ## closes all managed utp sockets and then underlying transport await p.utpRouter.shutdownWait() await p.transport.closeWait() - + proc connectTo*(r: UtpProtocol, address: TransportAddress): Future[ConnectionResult[TransportAddress]] = return r.utpRouter.connectTo(address) diff --git a/eth/utp/utp_router.nim b/eth/utp/utp_router.nim index e9b107c..aefb1be 100644 --- a/eth/utp/utp_router.nim +++ b/eth/utp/utp_router.nim @@ -184,7 +184,7 @@ proc processPacket[A](r: UtpRouter[A], p: Packet, sender: A) {.async.}= let rstPacket = resetPacket(randUint16(r.rng[]), p.header.connectionId, p.header.seqNr) await r.sendCb(sender, encodePacket(rstPacket)) -proc processIncomingBytes*[A](r: UtpRouter[A], bytes: seq[byte], sender: A) {.async.} = +proc processIncomingBytes*[A](r: UtpRouter[A], bytes: seq[byte], sender: A) {.async.} = if (not r.closed): let dec = decodePacket(bytes) if (dec.isOk()): @@ -202,11 +202,11 @@ proc generateNewUniqueSocket[A](r: UtpRouter[A], address: A): Option[UtpSocket[A if r.registerIfAbsent(socket): return some(socket) - + inc tryCount return none[UtpSocket[A]]() - + proc connect[A](s: UtpSocket[A]): Future[ConnectionResult[A]] {.async.}= let startFut = s.startOutgoingSocket() @@ -243,7 +243,7 @@ proc connectTo*[A](r: UtpRouter[A], address: A, connectionId: uint16): Future[Co let socket = newOutgoingSocket[A](address, r.sendCb, r.socketConfig, connectionId, r.rng[]) if (r.registerIfAbsent(socket)): - return await socket.connect() + return await socket.connect() else: return err(OutgoingConnectionError(kind: SocketAlreadyExists)) diff --git a/eth/utp/utp_socket.nim b/eth/utp/utp_socket.nim index 20f48c3..0778ed1 100644 --- a/eth/utp/utp_socket.nim +++ b/eth/utp/utp_socket.nim @@ -35,7 +35,7 @@ type UtpSocketKey*[A] = object remoteAddress*: A rcvId*: uint16 - + OutgoingPacket = object packetBytes: seq[byte] transmissions: uint16 @@ -50,7 +50,7 @@ type SendCallback*[A] = proc (to: A, data: seq[byte]): Future[void] {.gcsafe, raises: [Defect]} SocketConfig* = object - # This is configurable (in contrast to reference impl), as with standard 2 syn resends + # This is configurable (in contrast to reference impl), as with standard 2 syn resends # default timeout set to 3seconds and doubling of timeout with each re-send, it # means that initial connection would timeout after 21s, which seems rather long initialSynTimeout*: Duration @@ -77,7 +77,7 @@ type remoteWindowResetTimeout*: Duration WriteErrorType* = enum - SocketNotWriteable, + SocketNotWriteable, FinSent WriteError* = object @@ -114,11 +114,11 @@ type seqNr: uint16 # All seq number up to this havve been correctly acked by us ackNr: uint16 - + # Should be completed after succesful connection to remote host or after timeout # for the first syn packet connectionFuture: Future[void] - + # the number of packets in the send queue. Packets that haven't # yet been sent count as well as packets marked as needing resend # the oldest un-acked packet in the send queue is seq_nr - cur_window_packets @@ -135,7 +135,7 @@ type # current retransmit Timeout used to calculate rtoTimeout retransmitTimeout: Duration - + # calculated round trip time during communication with remote peer rtt: Duration # calculated round trip time variance @@ -147,7 +147,7 @@ type # RTO timeout will happen when currenTime > rtoTimeout rtoTimeout: Moment - # rcvBuffer + # rcvBuffer buffer: AsyncBuffer # loop called every 500ms to check for on going timeout status @@ -223,7 +223,7 @@ type OutgoingConnectionErrorType* = enum SocketAlreadyExists, ConnectionTimedOut, ErrorWhileSendingSyn - + OutgoingConnectionError* = object case kind*: OutgoingConnectionErrorType of ErrorWhileSendingSyn: @@ -243,7 +243,7 @@ const # How often each socket check its different on going timers checkTimeoutsLoopInterval = milliseconds(500) - # Defualt initial timeout for first Syn packet + # Defualt initial timeout for first Syn packet defaultInitialSynTimeout = milliseconds(3000) # Initial timeout to receive first Data data packet after receiving initial Syn @@ -274,7 +274,7 @@ const # to zero. i.e when we received a packet from remote peer with `wndSize` set to 0. defaultResetWindowTimeout = seconds(15) - # If remote peer window drops to zero, then after some time we will reset it + # If remote peer window drops to zero, then after some time we will reset it # to this value even if we do not receive any more messages from remote peers. # Reset period is configured in `SocketConfig` minimalRemoteWindow: uint32 = 1500 @@ -307,7 +307,7 @@ proc init( ) proc init*( - T: type SocketConfig, + T: type SocketConfig, initialSynTimeout: Duration = defaultInitialSynTimeout, dataResendsBeforeFailure: uint16 = defaultDataResendsBeforeFailure, optRcvBuffer: uint32 = defaultOptRcvBuffer, @@ -345,11 +345,11 @@ proc sendAck(socket: UtpSocket): Future[void] = ## Creates and sends ack, based on current socket state. Acks are different from ## other packets as we do not track them in outgoing buffet - let ackPacket = + let ackPacket = ackPacket( socket.seqNr, socket.connectionIdSnd, - socket.ackNr, + socket.ackNr, socket.getRcvWindowSize(), socket.replayMicro ) @@ -399,13 +399,13 @@ proc markAllPacketAsLost(s: UtpSocket) = proc isOpened(socket:UtpSocket): bool = return ( - socket.state == SynRecv or - socket.state == SynSent or + socket.state == SynRecv or + socket.state == SynSent or socket.state == Connected ) -proc shouldDisconnectFromFailedRemote(socket: UtpSocket): bool = - (socket.state == SynSent and socket.retransmitCount >= 2) or +proc shouldDisconnectFromFailedRemote(socket: UtpSocket): bool = + (socket.state == SynSent and socket.retransmitCount >= 2) or (socket.retransmitCount >= socket.socketConfig.dataResendsBeforeFailure) proc checkTimeouts(socket: UtpSocket) {.async.} = @@ -415,13 +415,13 @@ proc checkTimeouts(socket: UtpSocket) {.async.} = await socket.flushPackets() if socket.isOpened(): - + if (socket.sendBufferTracker.maxRemoteWindow == 0 and currentTime > socket.zeroWindowTimer): debug "Reset remote window to minimal value" socket.sendBufferTracker.updateMaxRemote(minimalRemoteWindow) if (currentTime > socket.rtoTimeout): - + # TODO add handling of probe time outs. Reference implemenation has mechanism # of sending probes to determine mtu size. Probe timeouts do not count to standard # timeouts calculations @@ -431,7 +431,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} = if (socket.state == SynRecv): socket.destroy() return - + if socket.shouldDisconnectFromFailedRemote(): if socket.state == SynSent and (not socket.connectionFuture.finished()): socket.connectionFuture.fail(newException(ConnectionError, "Connection to peer timed out")) @@ -442,7 +442,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} = let newTimeout = socket.retransmitTimeout * 2 socket.retransmitTimeout = newTimeout socket.rtoTimeout = currentTime + newTimeout - + let currentPacketSize = uint32(socket.getPacketSize()) if (socket.curWindowPackets == 0 and socket.sendBufferTracker.maxWindow > currentPacketSize): @@ -470,7 +470,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} = # as then every selecivly acked packet restes timeout timer and removes packet # from out buffer. markAllPacketAsLost(socket) - + # resend oldest packet if there are some packets in flight if (socket.curWindowPackets > 0): notice "resending oldest packet in outBuffer" @@ -531,7 +531,7 @@ proc handleDataWrite(socket: UtpSocket, data: seq[byte], writeFut: Future[WriteR if socket.curWindowPackets == 0: socket.resetSendTimeout() - let dataPacket = + let dataPacket = dataPacket( socket.seqNr, socket.connectionIdSnd, @@ -544,7 +544,7 @@ proc handleDataWrite(socket: UtpSocket, data: seq[byte], writeFut: Future[WriteR socket.registerOutgoingPacket(outgoingPacket) await socket.sendData(outgoingPacket.packetBytes) except CancelledError as exc: - # write loop has been cancelled in the middle of processing due to the + # write loop has been cancelled in the middle of processing due to the # socket closing # this approach can create partial write in case destroyin socket in the # the middle of the write @@ -566,8 +566,8 @@ proc handleClose(socket: UtpSocket): Future[void] {.async.} = try: if socket.curWindowPackets == 0: socket.resetSendTimeout() - - let finEncoded = + + let finEncoded = encodePacket( finPacket( socket.seqNr, @@ -577,13 +577,13 @@ proc handleClose(socket: UtpSocket): Future[void] {.async.} = socket.replayMicro ) ) - socket.registerOutgoingPacket(OutgoingPacket.init(finEncoded, 1, true, 0)) + socket.registerOutgoingPacket(OutgoingPacket.init(finEncoded, 1, true, 0)) await socket.sendData(finEncoded) socket.finSent = true except CancelledError as exc: raise exc -proc writeLoop(socket: UtpSocket): Future[void] {.async.} = +proc writeLoop(socket: UtpSocket): Future[void] {.async.} = ## Loop that processes writes on socket try: while true: @@ -603,7 +603,7 @@ proc writeLoop(socket: UtpSocket): Future[void] {.async.} = socket.writeQueue.clear() trace "writeLoop canceled" -proc startWriteLoop(s: UtpSocket) = +proc startWriteLoop(s: UtpSocket) = s.writeLoop = writeLoop(s) proc new[A]( @@ -689,7 +689,7 @@ proc newIncomingSocket*[A]( ): UtpSocket[A] = let initialSeqNr = randUint16(rng) - let (initialState, initialTimeout) = + let (initialState, initialTimeout) = if (cfg.incomingSocketReceiveTimeout.isNone()): # it does not matter what timeout value we put here, as socket will be in # connected state without outgoing packets in buffer so any timeout hit will @@ -724,7 +724,7 @@ proc startOutgoingSocket*(socket: UtpSocket): Future[void] {.async.} = socket.startTimeoutLoop() await socket.sendData(outgoingPacket.packetBytes) await socket.connectionFuture - + proc startIncomingSocket*(socket: UtpSocket) {.async.} = # Make sure ack was flushed before moving forward await socket.sendAck() @@ -769,7 +769,7 @@ proc updateTimeouts(socket: UtpSocket, timeSent: Moment, currentTime: Moment) = ## delta = rtt - packet_rtt ## rtt_var += (abs(delta) - rtt_var) / 4; ## rtt += (packet_rtt - rtt) / 8; - + let packetRtt = currentTime - timeSent if (socket.rtt.isZero): @@ -787,7 +787,7 @@ proc updateTimeouts(socket: UtpSocket, timeSent: Moment, currentTime: Moment) = socket.rttVar = newVar socket.rtt = newRtt - + # according to spec it should be: timeout = max(rtt + rtt_var * 4, 500) # but usually spec lags after implementation so milliseconds(1000) is used socket.rto = max(socket.rtt + (socket.rttVar * 4), milliseconds(1000)) @@ -798,15 +798,15 @@ proc ackPacket(socket: UtpSocket, seqNr: uint16, currentTime: Moment): AckResult let packet = packetOpt.get() if packet.transmissions == 0: - # according to reference impl it can happen when we get an ack_nr that - # does not exceed what we have stuffed into the outgoing buffer, + # according to reference impl it can happen when we get an ack_nr that + # does not exceed what we have stuffed into the outgoing buffer, # but does exceed what we have sent # TODO analyze if this case can happen with our impl return PacketNotSentYet - + socket.outBuffer.delete(seqNr) - # from spec: The rtt and rtt_var is only updated for packets that were sent only once. + # from spec: The rtt and rtt_var is only updated for packets that were sent only once. # This avoids problems with figuring out which packet was acked, the first or the second one. # it is standard solution to retransmission ambiguity problem if packet.transmissions == 1: @@ -830,7 +830,7 @@ proc ackPacket(socket: UtpSocket, seqNr: uint16, currentTime: Moment): AckResult proc ackPackets(socket: UtpSocket, nrPacketsToAck: uint16, currentTime: Moment) = ## Ack packets in outgoing buffer based on ack number in the received packet var i = 0 - while i < int(nrPacketsToack): + while i < int(nrPacketsToAck): let result = socket.ackPacket(socket.seqNr - socket.curWindowPackets, currentTime) case result of PacketAcked: @@ -847,7 +847,7 @@ proc calculateAckedbytes(socket: UtpSocket, nrPacketsToAck: uint16, now: Moment) var i: uint16 = 0 var ackedBytes: uint32 = 0 var minRtt: Duration = InfiniteDuration - while i < nrPacketsToack: + while i < nrPacketsToAck: let seqNr = socket.seqNr - socket.curWindowPackets + i let packetOpt = socket.outBuffer.get(seqNr) if (packetOpt.isSome() and packetOpt.unsafeGet().transmissions > 0): @@ -914,7 +914,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = if acks > socket.curWindowPackets: # this case happens if the we already received this ack nr acks = 0 - + # If packet is totally of the mark short circout the processing if pastExpected >= reorderBufferMaxSize: notice "Received packet is totally of the mark" @@ -924,13 +924,13 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = # TODO caluclate bytes acked by selective acks here (if thats the case) let sentTimeRemote = p.header.timestamp - - # we are using uint32 not a Duration, to wrap a round in case of + + # we are using uint32 not a Duration, to wrap a round in case of # sentTimeRemote > receipTimestamp. This can happen as local and remote # clock can be not synchornized or even using different system clock. # i.e this number itself does not tell anything and is only used to feedback it # to remote peer with each sent packet - let remoteDelay = + let remoteDelay = if (sentTimeRemote == 0): 0'u32 else: @@ -946,8 +946,8 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = # remote new delay base is less than previous # shift our delay base in other direction to take clock skew into account # but no more than 10ms - if (prevRemoteDelayBase != 0 and - wrapCompareLess(socket.remoteHistogram.delayBase, prevRemoteDelayBase) and + if (prevRemoteDelayBase != 0 and + wrapCompareLess(socket.remoteHistogram.delayBase, prevRemoteDelayBase) and prevRemoteDelayBase - socket.remoteHistogram.delayBase <= 10000'u32): socket.ourHistogram.shift(prevRemoteDelayBase - socket.remoteHistogram.delayBase) @@ -960,7 +960,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = # adjust base delay if delay estimates exceeds rtt if (socket.ourHistogram.getValue() > minRtt): let diff = uint32((socket.ourHistogram.getValue() - minRtt).microseconds()) - socket.ourHistogram.shift(diff) + socket.ourHistogram.shift(diff) let (newMaxWindow, newSlowStartTreshold, newSlowStart) = applyCongestionControl( @@ -985,7 +985,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = # when zeroWindowTimer will be hit and maxRemoteWindow still will be equal to 0 # then it will be reset to minimal value socket.zeroWindowTimer = timestampInfo.moment + socket.socketConfig.remoteWindowResetTimeout - + # socket.curWindowPackets == acks means that this packet acked all remaining packets # including the sent fin packets if (socket.finSent and socket.curWindowPackets == acks): @@ -998,7 +998,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = socket.destroy() socket.ackPackets(acks, timestampInfo.moment) - + case p.header.pType of ST_DATA, ST_FIN: # To avoid amplification attacks, server socket is in SynRecv state until @@ -1017,7 +1017,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = if (len(p.payload) > 0 and (not socket.readShutdown)): # we are getting in order data packet, we can flush data directly to the incoming buffer await upload(addr socket.buffer, unsafeAddr p.payload[0], p.payload.len()) - # Bytes have been passed to upper layer, we can increase number of last + # Bytes have been passed to upper layer, we can increase number of last # acked packet inc socket.ackNr @@ -1029,7 +1029,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = if ((not socket.reachedFin) and socket.gotFin and socket.eofPktNr == socket.ackNr): notice "Reached socket EOF" # In case of reaching eof, it is up to user of library what to to with - # it. With the current implementation, the most apropriate way would be to + # it. With the current implementation, the most apropriate way would be to # destory it (as with our implementation we know that remote is destroying its acked fin) # as any other send will either generate timeout, or socket will be forcefully # closed by reset @@ -1043,14 +1043,14 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = if socket.reorderCount == 0: break - + let nextPacketNum = socket.ackNr + 1 let maybePacket = socket.inBuffer.get(nextPacketNum) - + if maybePacket.isNone(): break - + let packet = maybePacket.unsafeGet() if (len(packet.payload) > 0 and (not socket.readShutdown)): @@ -1066,7 +1066,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = # how many concurrent tasks there are and how to cancel them when socket # is closed asyncSpawn socket.sendAck() - + # we got packet out of order else: notice "Got out of order packet" @@ -1075,7 +1075,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} = notice "Got packet past eof" return - # growing buffer before checking the packet is already there to avoid + # growing buffer before checking the packet is already there to avoid # looking at older packet due to indices wrap aroud socket.inBuffer.ensureSize(pkSeqNr + 1, pastExpected + 1) @@ -1143,14 +1143,14 @@ proc closeWait*(socket: UtpSocket) {.async.} = socket.close() await socket.closeEvent.wait() -proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] = +proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] = let retFuture = newFuture[WriteResult]("UtpSocket.write") if (socket.state != Connected): let res = Result[int, WriteError].err(WriteError(kind: SocketNotWriteable, currentState: socket.state)) retFuture.complete(res) return retFuture - + # fin should be last packet received by remote side, therefore trying to write # after sending fin is considered error if socket.sendFinRequested or socket.finSent: @@ -1159,12 +1159,12 @@ proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] = return retFuture var bytesWritten = 0 - + if len(data) == 0: let res = Result[int, WriteError].ok(bytesWritten) retFuture.complete(res) return retFuture - + try: socket.writeQueue.putNoWait(WriteRequest(kind: Data, data: data, writer: retFuture)) except AsyncQueueFullError as e: @@ -1209,7 +1209,7 @@ proc read*(socket: UtpSocket): Future[seq[byte]] {.async.}= var bytes = newSeq[byte]() readLoop(): - if socket.readingClosed(): + if socket.readingClosed(): (0, true) else: let count = socket.buffer.dataLen() @@ -1237,7 +1237,7 @@ proc numOfBytesInFlight*(socket: UtpSocket): uint32 = socket.sendBufferTracker.c # It throws assertion error when number of elements in buffer do not equal kept counter proc numPacketsInReordedBuffer*(socket: UtpSocket): int = var num = 0 - for e in socket.inBUffer.items(): + for e in socket.inBuffer.items(): if e.isSome(): inc num doAssert(num == int(socket.reorderCount)) diff --git a/tests/db/test_kvstore_sqlite3.nim b/tests/db/test_kvstore_sqlite3.nim index 0f49ff3..952de8b 100644 --- a/tests/db/test_kvstore_sqlite3.nim +++ b/tests/db/test_kvstore_sqlite3.nim @@ -29,7 +29,7 @@ procSuite "SqStoreRef": let insertStmt = db.prepareStmt( "INSERT INTO records(value) VALUES (?);", - openarray[byte], void).get + openArray[byte], void).get let insert1Res = insertStmt.exec [byte 1, 2, 3, 4] let insert2Res = insertStmt.exec @[] @@ -63,11 +63,11 @@ procSuite "SqStoreRef": let selectRangeStmt = db.prepareStmt( "SELECT value FROM records WHERE key >= ? and key < ?;", - (int64, int64), openarray[byte]).get + (int64, int64), openArray[byte]).get block: var allBytes = newSeq[byte]() - let selectRangeRes = selectRangeStmt.exec((0'i64, 5'i64)) do (bytes: openarray[byte]) {.gcsafe.}: + let selectRangeRes = selectRangeStmt.exec((0'i64, 5'i64)) do (bytes: openArray[byte]) {.gcsafe.}: allBytes.add byte(bytes.len) allBytes.add bytes @@ -80,7 +80,7 @@ procSuite "SqStoreRef": 0, 1, 5] block: - let selectRangeRes = selectRangeStmt.exec((10'i64, 20'i64)) do (bytes: openarray[byte]): + let selectRangeRes = selectRangeStmt.exec((10'i64, 20'i64)) do (bytes: openArray[byte]): echo "Got unexpected bytes: ", bytes check: @@ -106,7 +106,7 @@ procSuite "SqStoreRef": ] test "Tuple with byte arrays support": - # openarray[byte] requires either Nim 1.4 + # openArray[byte] requires either Nim 1.4 # or hardcoding the seq[byte] and array[N, byte] paths let db = SqStoreRef.init("", "test", inMemory = true)[] defer: db.close() diff --git a/tests/fuzzing/discoveryv5/fuzz_decode_message.nim b/tests/fuzzing/discoveryv5/fuzz_decode_message.nim index e9ca520..4d8121f 100644 --- a/tests/fuzzing/discoveryv5/fuzz_decode_message.nim +++ b/tests/fuzzing/discoveryv5/fuzz_decode_message.nim @@ -6,14 +6,14 @@ test: block: let decoded = decodeMessage(payload) - if decoded.isOK(): + if decoded.isOk(): let message = decoded.get() var encoded: seq[byte] case message.kind of unused: break of ping: encoded = encodeMessage(message.ping, message.reqId) of pong: encoded = encodeMessage(message.pong, message.reqId) - of findNode: encoded = encodeMessage(message.findNode, message.reqId) + of findnode: encoded = encodeMessage(message.findNode, message.reqId) of nodes: encoded = encodeMessage(message.nodes, message.reqId) of talkreq: encoded = encodeMessage(message.talkreq, message.reqId) of talkresp: encoded = encodeMessage(message.talkresp, message.reqId) diff --git a/tests/fuzzing/rlp/rlp_decode.nim b/tests/fuzzing/rlp/rlp_decode.nim index 83561b6..8b2ab80 100644 --- a/tests/fuzzing/rlp/rlp_decode.nim +++ b/tests/fuzzing/rlp/rlp_decode.nim @@ -10,7 +10,7 @@ type test1: uint32 test2: string -template testDecode(payload: openarray, T: type) = +template testDecode(payload: openArray, T: type) = try: discard rlp.decode(payload, T) except RlpError as e: diff --git a/tests/keyfile/test_keyfile.nim b/tests/keyfile/test_keyfile.nim index 26c6b8f..2ae90bc 100644 --- a/tests/keyfile/test_keyfile.nim +++ b/tests/keyfile/test_keyfile.nim @@ -245,7 +245,7 @@ suite "KeyFile test suite": jobject = createKeyFileJson(seckey1, "miawmiawcat", 3, AES128CTR, SCRYPT)[] privKey = decodeKeyFileJson(jobject, "miawmiawcat")[] - check privKey.toRaw == secKey1.toRaw + check privKey.toRaw == seckey1.toRaw test "Load non-existent pathname test": check: diff --git a/tests/keys/test_keys.nim b/tests/keys/test_keys.nim index 63a3dd7..600170d 100644 --- a/tests/keys/test_keys.nim +++ b/tests/keys/test_keys.nim @@ -16,7 +16,7 @@ import from strutils import toLowerAscii -proc compare(x: openarray[byte], y: openarray[byte]): bool = +proc compare(x: openArray[byte], y: openArray[byte]): bool = result = len(x) == len(y) if result: for i in 0..(len(x) - 1): diff --git a/tests/p2p/discv5_test_helper.nim b/tests/p2p/discv5_test_helper.nim index d9693fb..3ddad9b 100644 --- a/tests/p2p/discv5_test_helper.nim +++ b/tests/p2p/discv5_test_helper.nim @@ -11,8 +11,8 @@ proc localAddress*(port: int): Address = proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey, address: Address, - bootstrapRecords: openarray[Record] = [], - localEnrFields: openarray[(string, seq[byte])] = [], + bootstrapRecords: openArray[Record] = [], + localEnrFields: openArray[(string, seq[byte])] = [], previousRecord = none[enr.Record]()): discv5_protocol.Protocol = # set bucketIpLimit to allow bucket split @@ -30,13 +30,13 @@ proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey, result.open() -proc nodeIdInNodes*(id: NodeId, nodes: openarray[Node]): bool = +proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool = for n in nodes: if id == n.id: return true proc generateNode*(privKey: PrivateKey, port: int = 20302, ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1"), - localEnrFields: openarray[FieldPair] = []): Node = + localEnrFields: openArray[FieldPair] = []): Node = let port = Port(port) let enr = enr.Record.init(1, privKey, some(ip), some(port), some(port), localEnrFields).expect("Properly intialized private key") diff --git a/tests/p2p/test_discovery.nim b/tests/p2p/test_discovery.nim index a3756b9..ff1a1b0 100644 --- a/tests/p2p/test_discovery.nim +++ b/tests/p2p/test_discovery.nim @@ -33,7 +33,7 @@ proc packData(payload: openArray[byte], pk: PrivateKey): seq[byte] = msgHash = keccak256.digest(signature & payloadSeq) result = @(msgHash.data) & signature & payloadSeq -proc nodeIdInNodes(id: NodeId, nodes: openarray[Node]): bool = +proc nodeIdInNodes(id: NodeId, nodes: openArray[Node]): bool = for n in nodes: if id == n.id: return true diff --git a/tests/p2p/test_discoveryv5.nim b/tests/p2p/test_discoveryv5.nim index 2c9afce..9ef308c 100644 --- a/tests/p2p/test_discoveryv5.nim +++ b/tests/p2p/test_discoveryv5.nim @@ -676,7 +676,7 @@ suite "Discovery v5 Tests": # Check handshake duplicates check receiveNode.codec.handshakes.len == 1 # Check if it is for the first packet that a handshake is stored - let key = HandShakeKey(nodeId: sendNode.id, address: a) + let key = HandshakeKey(nodeId: sendNode.id, address: a) check receiveNode.codec.handshakes[key].whoareyouData.requestNonce == firstRequestNonce diff --git a/tests/p2p/test_discoveryv5_encoding.nim b/tests/p2p/test_discoveryv5_encoding.nim index 23bbaef..ffcda9c 100644 --- a/tests/p2p/test_discoveryv5_encoding.nim +++ b/tests/p2p/test_discoveryv5_encoding.nim @@ -292,7 +292,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors": let decoded = codecB.decodePacket(nodeA.address.get(), hexToSeqByte(encodedPacket)) check: - decoded.isOK() + decoded.isOk() decoded.get().messageOpt.isSome() decoded.get().messageOpt.get().reqId.id == hexToSeqByte(pingReqId) decoded.get().messageOpt.get().kind == ping @@ -313,7 +313,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors": hexToSeqByte(encodedPacket)) check: - decoded.isOK() + decoded.isOk() decoded.get().flag == Flag.Whoareyou decoded.get().whoareyou.requestNonce == hexToByteArray[gcmNonceSize](whoareyouRequestNonce) decoded.get().whoareyou.idNonce == hexToByteArray[idNonceSize](whoareyouIdNonce) @@ -352,7 +352,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors": challengeData: hexToSeqByte(whoareyouChallengeData)) pubkey = some(privKeyA.toPublicKey()) challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey) - key = HandShakeKey(nodeId: nodeA.id, address: nodeA.address.get()) + key = HandshakeKey(nodeId: nodeA.id, address: nodeA.address.get()) check: not codecB.handshakes.hasKeyOrPut(key, challenge) @@ -402,7 +402,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors": challengeData: hexToSeqByte(whoareyouChallengeData)) pubkey = none(PublicKey) challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey) - key = HandShakeKey(nodeId: nodeA.id, address: nodeA.address.get()) + key = HandshakeKey(nodeId: nodeA.id, address: nodeA.address.get()) check: not codecB.handshakes.hasKeyOrPut(key, challenge) @@ -520,7 +520,7 @@ suite "Discovery v5.1 Additional Encode/Decode": let decoded = codecB.decodePacket(nodeA.address.get(), data) - let key = HandShakeKey(nodeId: nodeB.id, address: nodeB.address.get()) + let key = HandshakeKey(nodeId: nodeB.id, address: nodeB.address.get()) var challenge: Challenge check: diff --git a/tests/p2p/test_ecies.nim b/tests/p2p/test_ecies.nim index f062b47..2b87146 100644 --- a/tests/p2p/test_ecies.nim +++ b/tests/p2p/test_ecies.nim @@ -14,7 +14,7 @@ import nimcrypto/[utils, sha2, hmac, rijndael], ../../eth/keys, ../../eth/p2p/ecies -proc compare[A, B](x: openarray[A], y: openarray[B], s: int = 0): bool = +proc compare[A, B](x: openArray[A], y: openArray[B], s: int = 0): bool = result = true doAssert(s >= 0) var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y))) diff --git a/tests/p2p/test_routing_table.nim b/tests/p2p/test_routing_table.nim index 4a04356..da35e74 100644 --- a/tests/p2p/test_routing_table.nim +++ b/tests/p2p/test_routing_table.nim @@ -6,7 +6,7 @@ import ../../eth/keys, ../../eth/p2p/discoveryv5/[routing_table, node, enr], ./discv5_test_helper -func customDistance*(a, b: NodeId): Uint256 = +func customDistance*(a, b: NodeId): UInt256 = if a >= b: a - b else: @@ -31,8 +31,8 @@ suite "Routing Table Tests": bucketIpLimit: BUCKET_SIZE + REPLACEMENT_CACHE_SIZE + 1) let customDistanceCalculator = DistanceCalculator( - calculateDistance: customDistance, - calculateLogDistance: customLogDistance, + calculateDistance: customDistance, + calculateLogDistance: customLogDistance, calculateIdAtDistance: customIdAdDist) test "Add local node": diff --git a/tests/p2p/test_shh.nim b/tests/p2p/test_shh.nim index 6c80199..f047771 100644 --- a/tests/p2p/test_shh.nim +++ b/tests/p2p/test_shh.nim @@ -226,11 +226,11 @@ suite "Whisper envelope": var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef]) # PoW calculation with no leading zeroes env.nonce = 100000 - check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6", + check hashAndPow(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6", 0.07692307692307693) # PoW calculation with 8 leading zeroes env.nonce = 276 - check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C", + check hashAndPow(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C", 19.692307692307693) suite "Whisper queue": diff --git a/tests/rlp/test_api_usage.nim b/tests/rlp/test_api_usage.nim index 727bb6a..57132fe 100644 --- a/tests/rlp/test_api_usage.nim +++ b/tests/rlp/test_api_usage.nim @@ -105,7 +105,7 @@ suite "test api usage": rlp.listElem(2).toString == "Donec ligula tortor, egestas eu est vitae" # test creating RLPs from other RLPs - var list = rlpFromBytes encodeList(rlp.listELem(1), rlp.listELem(0)) + var list = rlpFromBytes encodeList(rlp.listElem(1), rlp.listElem(0)) # test that iteration with enterList/skipElem works as expected doAssert list.enterList # We already know that we are working with a list @@ -191,7 +191,7 @@ suite "test api usage": Inf, NegInf, NaN]: template isNaN(n): bool = - classify(n) == fcNaN + classify(n) == fcNan template chk(input) = let restored = decode(encode(input), float64) diff --git a/tests/trie/test_json_suite.nim b/tests/trie/test_json_suite.nim index 5af9c4f..e204092 100644 --- a/tests/trie/test_json_suite.nim +++ b/tests/trie/test_json_suite.nim @@ -14,7 +14,7 @@ type proc cmp(lhs, rhs: TestOp): int = cmp(lhs.idx, rhs.idx) proc `<=`(lhs, rhs: TestOp): bool = lhs.idx <= rhs.idx -proc runSingleTest(testSequence: openarray[TestOp], +proc runSingleTest(testSequence: openArray[TestOp], secureMode: bool, expectedRootHash: string): bool = var diff --git a/tests/utp/test_discv5_protocol.nim b/tests/utp/test_discv5_protocol.nim index ab675d8..73e92a2 100644 --- a/tests/utp/test_discv5_protocol.nim +++ b/tests/utp/test_discv5_protocol.nim @@ -21,8 +21,8 @@ proc localAddress*(port: int): Address = proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey, address: Address, - bootstrapRecords: openarray[Record] = [], - localEnrFields: openarray[(string, seq[byte])] = [], + bootstrapRecords: openArray[Record] = [], + localEnrFields: openArray[(string, seq[byte])] = [], previousRecord = none[enr.Record]()): discv5_protocol.Protocol = # set bucketIpLimit to allow bucket split let tableIpLimits = TableIpLimits(tableIpLimit: 1000, bucketIpLimit: 24) @@ -213,7 +213,7 @@ procSuite "Utp protocol over discovery v5 tests": check: wResult.isOk() - + let readData = await clientSocket.read(len(serverData)) check: diff --git a/tests/utp/test_packets.nim b/tests/utp/test_packets.nim index 6f4debc..e1368e1 100644 --- a/tests/utp/test_packets.nim +++ b/tests/utp/test_packets.nim @@ -24,9 +24,9 @@ suite "Utp packets encoding/decoding": check: len(encoded) == 20 decoded.isOk() - + let synPacketDec = decoded.get() - + check: synPacketDec == synPacket @@ -38,9 +38,9 @@ suite "Utp packets encoding/decoding": check: len(encoded) == 20 decoded.isOk() - + let finPacketDec = decoded.get() - + check: finPacketDec == finPacket @@ -52,9 +52,9 @@ suite "Utp packets encoding/decoding": check: len(encoded) == 20 decoded.isOk() - + let resetPacketDec = decoded.get() - + check: resetPacketDec == resetPacket @@ -66,9 +66,9 @@ suite "Utp packets encoding/decoding": check: len(encoded) == 20 decoded.isOk() - + let ackPacketDec = decoded.get() - + check: ackPacketDec == ackPacket @@ -81,9 +81,9 @@ suite "Utp packets encoding/decoding": check: len(encoded) == 26 decoded.isOk() - + let ackPacketDec = decoded.get() - + check: ackPacketDec == ackPacket ackPacketDec.eack.isSome() @@ -112,7 +112,7 @@ suite "Utp packets encoding/decoding": # delete last byte, now packet is to short encoded3.del(encoded3.high) let err3 = decodePacket(encoded3) - + check: err3.isErr() err3.error() == "Packet too short for selective ack extension" diff --git a/tests/utp/test_protocol.nim b/tests/utp/test_protocol.nim index 7df752e..d39207f 100644 --- a/tests/utp/test_protocol.nim +++ b/tests/utp/test_protocol.nim @@ -36,13 +36,13 @@ proc allowOneIdCallback(allowedId: uint16): AllowConnectionCallback[TransportAdd connectionId == allowedId ) -proc transferData(sender: UtpSocket[TransportAddress], receiver: UtpSocket[TransportAddress], data: seq[byte]): Future[seq[byte]] {.async.}= +proc transferData(sender: UtpSocket[TransportAddress], receiver: UtpSocket[TransportAddress], data: seq[byte]): Future[seq[byte]] {.async.} = let bytesWritten = await sender.write(data) doAssert bytesWritten.get() == len(data) let received = await receiver.read(len(data)) return received -type +type ClientServerScenario = object utp1: UtpProtocol utp2: UtpProtocol @@ -94,7 +94,7 @@ proc init2ClientsServerScenario(): Future[TwoClientsServerScenario] {.async.} = let address3 = initTAddress("127.0.0.1", 9081) let utpProt3 = UtpProtocol.new(registerIncomingSocketCallback(serverSockets), address3) - + let clientSocket1 = await utpProt1.connectTo(address2) let clientSocket2 = await utpProt1.connectTo(address3) @@ -135,13 +135,13 @@ procSuite "Utp protocol over udp tests": let sock = sockResult.get() # this future will be completed when we called accepted connection callback await server2Called.wait() - + check: sock.isConnected() # after successful connection outgoing buffer should be empty as syn packet # should be correctly acked sock.numPacketsInOutGoingBuffer() == 0 - + server2Called.isSet() await utpProt1.shutdownWait() @@ -155,7 +155,7 @@ procSuite "Utp protocol over udp tests": let address1 = initTAddress("127.0.0.1", 9080) let connectionResult = await utpProt1.connectTo(address1) - + check: connectionResult.isErr() @@ -165,7 +165,7 @@ procSuite "Utp protocol over udp tests": connectionError.kind == ConnectionTimedOut await waitUntil(proc (): bool = utpProt1.openSockets() == 0) - + check: utpProt1.openSockets() == 0 @@ -177,7 +177,7 @@ procSuite "Utp protocol over udp tests": let utpProt1 = UtpProtocol.new(setAcceptedCallback(server1Called), address, SocketConfig.init(milliseconds(500))) let address1 = initTAddress("127.0.0.1", 9080) - + let futSock = utpProt1.connectTo(address1) # waiting 400 milisecond will trigger at least one re-send @@ -188,11 +188,11 @@ procSuite "Utp protocol over udp tests": # this future will be completed when we called accepted connection callback await server2Called.wait() - + yield futSock check: - futSock.finished() and (not futsock.failed()) and (not futsock.cancelled()) + futSock.finished() and (not futSock.failed()) and (not futSock.cancelled()) server2Called.isSet() await utpProt1.shutdownWait() @@ -200,7 +200,7 @@ procSuite "Utp protocol over udp tests": asyncTest "Success data transfer when data fits into one packet": let s = await initClientServerScenario() - + check: s.clientSocket.isConnected() # after successful connection outgoing buffer should be empty as syn packet @@ -227,7 +227,7 @@ procSuite "Utp protocol over udp tests": asyncTest "Success data transfer when data need to be sliced into multiple packets": let s = await initClientServerScenario() - + check: s.clientSocket.isConnected() # after successful connection outgoing buffer should be empty as syn packet @@ -238,7 +238,7 @@ procSuite "Utp protocol over udp tests": # 5000 bytes is over maximal packet size let bytesToTransfer = generateByteArray(rng[], 5000) - + let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer) let bytesReceivedFromServer = await transferData(s.serverSocket, s.clientSocket, bytesToTransfer) @@ -267,7 +267,7 @@ procSuite "Utp protocol over udp tests": # 5000 bytes is over maximal packet size let bytesToTransfer = generateByteArray(rng[], 5000) - + let written = await s.clientSocket.write(bytesToTransfer) check: @@ -281,7 +281,7 @@ procSuite "Utp protocol over udp tests": written1.get() == len(bytesToTransfer) let bytesReceived = await s.serverSocket.read(len(bytesToTransfer) + len(bytesToTransfer1)) - + # ultimatly all send packets will acked, and outgoing buffer will be empty await waitUntil(proc (): bool = s.clientSocket.numPacketsInOutGoingBuffer() == 0) @@ -293,20 +293,20 @@ procSuite "Utp protocol over udp tests": asyncTest "Success data transfers from multiple clients": let s = await init2ClientsServerScenario() - + check: s.clientSocket1.isConnected() s.clientSocket2.isConnected() s.clientSocket1.numPacketsInOutGoingBuffer() == 0 s.clientSocket2.numPacketsInOutGoingBuffer() == 0 - + let numBytesToTransfer = 5000 let client1Data = generateByteArray(rng[], numBytesToTransfer) let client2Data = generateByteArray(rng[], numBytesToTransfer) discard s.clientSocket1.write(client1Data) discard s.clientSocket2.write(client2Data) - + let server1ReadBytes = await s.serverSocket1.read(numBytesToTransfer) let server2ReadBytes = await s.serverSocket2.read(numBytesToTransfer) @@ -386,7 +386,7 @@ procSuite "Utp protocol over udp tests": var serverSockets = newAsyncQueue[UtpSocket[TransportAddress]]() var server1Called = newAsyncEvent() let address1 = initTAddress("127.0.0.1", 9079) - let utpProt1 = + let utpProt1 = UtpProtocol.new(setAcceptedCallback(server1Called), address1, SocketConfig.init(lowSynTimeout)) let address2 = initTAddress("127.0.0.1", 9080) @@ -401,7 +401,7 @@ procSuite "Utp protocol over udp tests": SocketConfig.init(), allowOneIdCallback(allowedId) ) - + let allowedSocketRes = await utpProt1.connectTo(address3, allowedId) let notAllowedSocketRes = await utpProt2.connectTo(address3, allowedId + 1) @@ -424,7 +424,7 @@ procSuite "Utp protocol over udp tests": asyncTest "Success data transfer of a lot of data should increase available window on sender side": let s = await initClientServerScenario() - + check: s.clientSocket.isConnected() # initially window has value equal to some pre configured constant @@ -437,7 +437,7 @@ procSuite "Utp protocol over udp tests": # big transfer of 50kb let bytesToTransfer = generateByteArray(rng[], 50000) - + let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer) # ultimatly all send packets will acked, and outgoing buffer will be empty @@ -455,7 +455,7 @@ procSuite "Utp protocol over udp tests": asyncTest "Not used socket should decay its max send window": let s = await initClientServerScenario() - + check: s.clientSocket.isConnected() # initially window has value equal to some pre configured constant @@ -468,7 +468,7 @@ procSuite "Utp protocol over udp tests": # big transfer of 50kb let bytesToTransfer = generateByteArray(rng[], 50000) - + let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer) # ultimatly all send packets will acked, and outgoing buffer will be empty diff --git a/tests/utp/test_utp_router.nim b/tests/utp/test_utp_router.nim index 5371bfa..11b8cc2 100644 --- a/tests/utp/test_utp_router.nim +++ b/tests/utp/test_utp_router.nim @@ -52,9 +52,9 @@ procSuite "Utp router unit tests": r: UtpRouter[int], remote: int, pq: AsyncQueue[(Packet, int)], - initialRemoteSeq: uint16): (UtpSocket[int], Packet)= + initialRemoteSeq: uint16): (UtpSocket[int], Packet)= let connectFuture = router.connectTo(remote) - + let (initialPacket, sender) = await pq.get() check: @@ -62,10 +62,10 @@ procSuite "Utp router unit tests": let responseAck = ackPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, 0 ) @@ -98,7 +98,7 @@ procSuite "Utp router unit tests": asyncTest "Incoming connection should be closed when not receving data for period of time when configured": let q = newAsyncQueue[UtpSocket[int]]() - let router = + let router = UtpRouter[int].new( registerIncomingSocketCallback(q), SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(2))), @@ -123,7 +123,7 @@ procSuite "Utp router unit tests": asyncTest "Incoming connection should be in connected state when configured": let q = newAsyncQueue[UtpSocket[int]]() - let router = + let router = UtpRouter[int].new( registerIncomingSocketCallback(q), SocketConfig.init(incomingSocketReceiveTimeout = none[Duration]()), @@ -150,7 +150,7 @@ procSuite "Utp router unit tests": asyncTest "Incoming connection should change state to connected when receiving data packet": let q = newAsyncQueue[UtpSocket[int]]() let pq = newAsyncQueue[(Packet, int)]() - let router = + let router = UtpRouter[int].new( registerIncomingSocketCallback(q), SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(3))), @@ -168,20 +168,20 @@ procSuite "Utp router unit tests": let (initialPacket, _) = await pq.get() let socket = await q.get() - + check: router.len() == 1 # socket is not configured to be connected until receiving data not socket.isConnected() - let encodedData = + let encodedData = encodePacket( dataPacket( initSeq + 1, - initConnId + 1, - initialPacket.header.seqNr - 1, - 10, - dataToSend, + initConnId + 1, + initialPacket.header.seqNr - 1, + 10, + dataToSend, 0 ) ) @@ -264,7 +264,7 @@ procSuite "Utp router unit tests": let requestedConnectionId = 1'u16 let connectFuture = router.connectTo(testSender2, requestedConnectionId) - + let (initialPacket, sender) = await pq.get() check: @@ -274,17 +274,17 @@ procSuite "Utp router unit tests": let responseAck = ackPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, 0 ) await router.processIncomingBytes(encodePacket(responseAck), testSender2) let outgoingSocket = await connectFuture - + check: outgoingSocket.get().isConnected() router.len() == 1 @@ -302,7 +302,7 @@ procSuite "Utp router unit tests": router.sendCb = initTestSnd(pq) let connectFuture = router.connectTo(testSender2) - + let (initialPacket, sender) = await pq.get() check: @@ -322,14 +322,14 @@ procSuite "Utp router unit tests": router.sendCb = initTestSnd(pq) let connectFuture = router.connectTo(testSender2) - + let (initialPacket, sender) = await pq.get() check: initialPacket.header.pType == ST_SYN router.len() == 1 - await connectFuture.cancelAndWait() + await connectFuture.cancelAndWait() check: router.len() == 0 @@ -353,7 +353,7 @@ procSuite "Utp router unit tests": connectResult.error().kind == ErrorWhileSendingSyn cast[TestError](connectResult.error().error) is TestError router.len() == 0 - + asyncTest "Router should clear closed outgoing connections": let q = newAsyncQueue[UtpSocket[int]]() let pq = newAsyncQueue[(Packet, int)]() @@ -409,7 +409,7 @@ procSuite "Utp router unit tests": check: router.len() == 0 - + asyncTest "Router close outgoing connection which receives reset": let q = newAsyncQueue[UtpSocket[int]]() let pq = newAsyncQueue[(Packet, int)]() diff --git a/tests/utp/test_utp_socket.nim b/tests/utp/test_utp_socket.nim index 9ebc9bf..dae7eb0 100644 --- a/tests/utp/test_utp_socket.nim +++ b/tests/utp/test_utp_socket.nim @@ -71,7 +71,7 @@ procSuite "Utp socket unit test": check: initialPacket.header.pType == ST_SYN - let responseAck = + let responseAck = ackPacket( initialRemoteSeq, initialPacket.header.connectionId, @@ -159,7 +159,7 @@ procSuite "Utp socket unit test": let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q) - let dataP1 = + let dataP1 = dataPacket( initalRemoteSeqNr, initialPacket.header.connectionId, @@ -168,7 +168,7 @@ procSuite "Utp socket unit test": data, 0 ) - + await outgoingSocket.processPacket(dataP1) let ack1 = await q.get() @@ -205,7 +205,7 @@ procSuite "Utp socket unit test": check: ack2.header.pType == ST_STATE # we are acking in one shot whole 10 packets - ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1) + ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1) let receivedData = await outgoingSocket.read(len(data)) @@ -213,7 +213,7 @@ procSuite "Utp socket unit test": receivedData == data await outgoingSocket.destroyWait() - + asyncTest "Processing out of order data packet should ignore duplicated not ordered packets": # TODO test is valid until implementing selective acks let q = newAsyncQueue[Packet]() @@ -240,15 +240,15 @@ procSuite "Utp socket unit test": check: ack2.header.pType == ST_STATE # we are acking in one shot whole 10 packets - ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1) + ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1) let receivedData = await outgoingSocket.read(len(data)) check: receivedData == data - + await outgoingSocket.destroyWait() - + asyncTest "Processing packets in random order": # TODO test is valid until implementing selective acks let q = newAsyncQueue[Packet]() @@ -274,7 +274,7 @@ procSuite "Utp socket unit test": # as they can be fired at any point. What matters is that data is passed # in same order as received. receivedData == data - + await outgoingSocket.destroyWait() asyncTest "Ignoring totally out of order packet": @@ -332,7 +332,7 @@ procSuite "Utp socket unit test": await outgoingSocket.processPacket(responseAck) - check: + check: outgoingSocket.numPacketsInOutGoingBuffer() == 0 await outgoingSocket.destroyWait() @@ -356,7 +356,7 @@ procSuite "Utp socket unit test": not writeFut1.finished() not writeFut2.finished() - outgoingSocket.destroy() + outgoingSocket.destroy() yield writeFut1 yield writeFut2 @@ -367,7 +367,7 @@ procSuite "Utp socket unit test": writeFut1.read().isErr() writeFut2.read().isErr() - await outgoingSocket.destroyWait() + await outgoingSocket.destroyWait() asyncTest "Cancelled write futures should not be processed if cancelled before processing": let q = newAsyncQueue[Packet]() @@ -416,14 +416,14 @@ procSuite "Utp socket unit test": p1.payload == dataToWrite1 p2.payload == dataToWrite3 - await outgoingSocket.destroyWait() + await outgoingSocket.destroyWait() asyncTest "Socket should re-send data packet configurable number of times before declaring failure": - let q = newAsyncQueue[Packet]() + let q = newAsyncQueue[Packet]() let initalRemoteSeqNr = 10'u16 let outgoingSocket = newOutgoingSocket[TransportAddress]( - testAddress, + testAddress, initTestSnd(q), SocketConfig.init(milliseconds(3000), 2), defaultRcvOutgoingId, @@ -437,7 +437,7 @@ procSuite "Utp socket unit test": check: initialPacket.header.pType == ST_SYN - let responseAck = + let responseAck = ackPacket( initalRemoteSeqNr, initialPacket.header.connectionId, @@ -499,13 +499,13 @@ procSuite "Utp socket unit test": let finP = finPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr, testBufferSize, 0 ) - + await outgoingSocket.processPacket(finP) let ack1 = await q.get() @@ -525,35 +525,35 @@ procSuite "Utp socket unit test": let readF = outgoingSocket.read() - let dataP = + let dataP = dataPacket( initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, - data, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, + data, 0 ) - let dataP1 = + let dataP1 = dataPacket( - initialRemoteSeq + 1, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, - data1, + initialRemoteSeq + 1, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, + data1, 0 ) - let finP = + let finP = finPacket( - initialRemoteSeq + 2, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, + initialRemoteSeq + 2, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, 0 ) - + await outgoingSocket.processPacket(finP) check: @@ -565,7 +565,7 @@ procSuite "Utp socket unit test": check: not readF.finished() not outgoingSocket.atEof() - + await outgoingSocket.processPacket(dataP) let bytes = await readF @@ -587,34 +587,34 @@ procSuite "Utp socket unit test": let readF = outgoingSocket.read() - let dataP = + let dataP = dataPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, - data, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, + data, 0 ) - let finP = + let finP = finPacket( - initialRemoteSeq + 1, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, + initialRemoteSeq + 1, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, 0 ) # dataP1 has seqNr larger than fin, there fore it should be considered past eof and never passed # to user of library - let dataP1 = + let dataP1 = dataPacket( - initialRemoteSeq + 2, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, - data1, + initialRemoteSeq + 2, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, + data1, 0 ) @@ -630,7 +630,7 @@ procSuite "Utp socket unit test": check: not readF.finished() not outgoingSocket.atEof() - + await outgoingSocket.processPacket(dataP) # it is in order dataP1, as we have now processed dataP + fin which came before @@ -674,20 +674,20 @@ procSuite "Utp socket unit test": check: sendFin.header.pType == ST_FIN - let responseAck = + let responseAck = ackPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - sendFin.header.seqNr, + initialRemoteSeq, + initialPacket.header.connectionId, + sendFin.header.seqNr, testBufferSize, 0 ) await outgoingSocket.processPacket(responseAck) - + check: not outgoingSocket.isConnected() - + await outgoingSocket.destroyWait() asyncTest "Trying to write data onto closed socket should return error": @@ -737,16 +737,16 @@ procSuite "Utp socket unit test": let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize) let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeqNr, q, testBufferSize, sCfg) - let dataP1 = + let dataP1 = dataPacket( - initialRemoteSeqNr, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, + initialRemoteSeqNr, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, data, 0 ) - + await outgoingSocket.processPacket(dataP1) let ack1 = await q.get() @@ -782,16 +782,16 @@ procSuite "Utp socket unit test": let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize) let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q, testBufferSize, sCfg) - let dataP1 = + let dataP1 = dataPacket( - initalRemoteSeqNr, - initialPacket.header.connectionId, - initialPacket.header.seqNr, - testBufferSize, - data, + initalRemoteSeqNr, + initialPacket.header.connectionId, + initialPacket.header.seqNr, + testBufferSize, + data, 0 ) - + await outgoingSocket.processPacket(dataP1) let ack1 = await q.get() @@ -815,7 +815,7 @@ procSuite "Utp socket unit test": # we have read all data from rcv buffer, advertised window should go back to # initial size sentData.header.wndSize == initialRcvBufferSize - + await outgoingSocket.destroyWait() asyncTest "Socket should ignore packets with bad ack number": @@ -828,36 +828,36 @@ procSuite "Utp socket unit test": let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q) # data packet with ack nr set above our seq nr i.e packet from the future - let dataFuture = + let dataFuture = dataPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr + 1, - testBufferSize, - data1, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr + 1, + testBufferSize, + data1, 0 ) # data packet wth ack number set below out ack window i.e packet too old - let dataTooOld = + let dataTooOld = dataPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr - allowedAckWindow - 1, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr - allowedAckWindow - 1, testBufferSize, - data2, + data2, 0 ) - let dataOk = + let dataOk = dataPacket( initialRemoteSeq, initialPacket.header.connectionId, initialPacket.header.seqNr, - testBufferSize, + testBufferSize, data3, 0 ) - + await outgoingSocket.processPacket(dataFuture) await outgoingSocket.processPacket(dataTooOld) await outgoingSocket.processPacket(dataOk) @@ -867,7 +867,7 @@ procSuite "Utp socket unit test": check: # data1 and data2 were sent in bad packets we should only receive data3 receivedBytes == data3 - + await outgoingSocket.destroyWait() asyncTest "Writing data should increase current bytes window": @@ -914,9 +914,9 @@ procSuite "Utp socket unit test": let responseAck = ackPacket( initialRemoteSeq, - initialPacket.header.connectionId, - sentPacket.header.seqNr, - testBufferSize, + initialPacket.header.connectionId, + sentPacket.header.seqNr, + testBufferSize, 0 ) @@ -946,7 +946,7 @@ procSuite "Utp socket unit test": discard await outgoingSocket.write(dataToWrite1) - + let sentPacket1 = await q.get() check: @@ -986,9 +986,9 @@ procSuite "Utp socket unit test": let someAckFromRemote = ackPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr, uint32(len(dataToWrite)), 0 ) @@ -1014,12 +1014,12 @@ procSuite "Utp socket unit test": # remote is initialized with buffer to small to handle whole payload let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q) let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize()) - let someAckFromRemote = + let someAckFromRemote = ackPacket( - initialRemoteSeq, + initialRemoteSeq, initialPacket.header.connectionId, initialPacket.header.seqNr, - remoteRcvWindowSize, + remoteRcvWindowSize, 0 ) @@ -1027,7 +1027,7 @@ procSuite "Utp socket unit test": await outgoingSocket.processPacket(someAckFromRemote) let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize)) - + let writeFut = outgoingSocket.write(twoPacketData) # after this time first packet will be send and will timeout, but the write should not @@ -1049,9 +1049,9 @@ procSuite "Utp socket unit test": # remote is initialized with buffer to small to handle whole payload let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q) let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize()) - let someAckFromRemote = + let someAckFromRemote = ackPacket( - initialRemoteSeq, + initialRemoteSeq, initialPacket.header.connectionId, initialPacket.header.seqNr, remoteRcvWindowSize, @@ -1062,15 +1062,15 @@ procSuite "Utp socket unit test": await outgoingSocket.processPacket(someAckFromRemote) let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize)) - + let writeFut = outgoingSocket.write(twoPacketData) - let firstAckFromRemote = + let firstAckFromRemote = ackPacket( initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr + 1, - remoteRcvWindowSize, + initialPacket.header.connectionId, + initialPacket.header.seqNr + 1, + remoteRcvWindowSize, 0 ) @@ -1096,11 +1096,11 @@ procSuite "Utp socket unit test": let q = newAsyncQueue[Packet]() let initialRemoteSeq = 10'u16 let someData = @[1'u8] - let (outgoingSocket, packet) = + let (outgoingSocket, packet) = connectOutGoingSocket( initialRemoteSeq, q, - remoteReceiveBuffer = 0, + remoteReceiveBuffer = 0, cfg = SocketConfig.init( remoteWindowResetTimeout = seconds(3) ) @@ -1121,7 +1121,7 @@ procSuite "Utp socket unit test": # Ultimately, after 3 second remote rcv window will be reseted to minimal value # and write will be able to progress let writeResult = await writeFut - + let p = await q.get() check: @@ -1134,8 +1134,8 @@ procSuite "Utp socket unit test": let q = newAsyncQueue[Packet]() let initialRemoteSeq = 10'u16 let someData1 = @[1'u8] - let somedata2 = @[2'u8] - let (outgoingSocket, initialPacket) = + let someData2 = @[2'u8] + let (outgoingSocket, initialPacket) = connectOutGoingSocket( initialRemoteSeq, q, @@ -1163,10 +1163,10 @@ procSuite "Utp socket unit test": let someAckFromRemote = ackPacket( - initialRemoteSeq, - initialPacket.header.connectionId, - initialPacket.header.seqNr + 1, - 10, + initialRemoteSeq, + initialPacket.header.connectionId, + initialPacket.header.seqNr + 1, + 10, 0 ) @@ -1181,7 +1181,7 @@ procSuite "Utp socket unit test": check: writeFut2.finished() firstPacket.payload == someData1 - secondPacket.payload == somedata2 + secondPacket.payload == someData2 await outgoingSocket.destroyWait() @@ -1191,7 +1191,7 @@ procSuite "Utp socket unit test": let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q) - let dataP1 = + let dataP1 = dataPacket( initialRemoteSeq, initialPacket.header.connectionId, @@ -1203,7 +1203,7 @@ procSuite "Utp socket unit test": check: outgoingSocket.isConnected() - + # necessary to avoid timestampDiff near 0 and flaky tests await sleepAsync(milliseconds(50)) await outgoingSocket.processPacket(dataP1) @@ -1216,7 +1216,7 @@ procSuite "Utp socket unit test": await outgoingSocket.destroyWait() asyncTest "Re-sent packet should have updated timestamps and ack numbers": - let q = newAsyncQueue[Packet]() + let q = newAsyncQueue[Packet]() let initalRemoteSeqNr = 10'u16 let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q) @@ -1236,7 +1236,7 @@ procSuite "Utp socket unit test": secondSend.header.timestamp > firstSend.header.timestamp firstSend.header.ackNr == secondSend.header.ackNr - let dataP1 = + let dataP1 = dataPacket( initalRemoteSeqNr, initialPacket.header.connectionId, @@ -1245,7 +1245,7 @@ procSuite "Utp socket unit test": @[1'u8], 0 ) - + await outgoingSocket.processPacket(dataP1) let ack = await q.get()