mirror of https://github.com/status-im/nim-eth.git
Style fixes according to --styleCheck:usages (#452)
Currently only setting `--styleCheck:hint` as there are some dependency fixes required and the compiler seems to trip over the findnode MessageKind, findnode Message field and the findNode proc. Also over protocol.Protocol usage.
This commit is contained in:
parent
0cfe7df817
commit
2c236f6495
10
doc/p2p.md
10
doc/p2p.md
|
@ -67,7 +67,7 @@ the network. To start the connection process, call `node.connectToNetwork`:
|
||||||
|
|
||||||
``` nim
|
``` nim
|
||||||
proc connectToNetwork*(node: var EthereumNode,
|
proc connectToNetwork*(node: var EthereumNode,
|
||||||
bootstrapNodes: openarray[ENode],
|
bootstrapNodes: openArray[ENode],
|
||||||
startListening = true,
|
startListening = true,
|
||||||
enableDiscovery = true)
|
enableDiscovery = true)
|
||||||
```
|
```
|
||||||
|
@ -106,7 +106,7 @@ p2pProtocol DevP2P(version = 0, rlpxName = "p2p"):
|
||||||
proc hello(peer: Peer,
|
proc hello(peer: Peer,
|
||||||
version: uint,
|
version: uint,
|
||||||
clientId: string,
|
clientId: string,
|
||||||
capabilities: openarray[Capability],
|
capabilities: openArray[Capability],
|
||||||
listenPort: uint,
|
listenPort: uint,
|
||||||
nodeId: P2PNodeId) =
|
nodeId: P2PNodeId) =
|
||||||
peer.id = nodeId
|
peer.id = nodeId
|
||||||
|
@ -195,7 +195,7 @@ There are few things to note in the above example:
|
||||||
|
|
||||||
2. Each message defined in the protocol received a corresponding type name,
|
2. Each message defined in the protocol received a corresponding type name,
|
||||||
matching the message name (e.g. `p2p.hello`). This type will have fields
|
matching the message name (e.g. `p2p.hello`). This type will have fields
|
||||||
matching the parameter names of the message. If the messages has `openarray`
|
matching the parameter names of the message. If the messages has `openArray`
|
||||||
params, these will be remapped to `seq` types.
|
params, these will be remapped to `seq` types.
|
||||||
|
|
||||||
If the designated messages also has an attached handler, the future returned
|
If the designated messages also has an attached handler, the future returned
|
||||||
|
@ -219,8 +219,8 @@ p2pProtocol les(version = 2):
|
||||||
...
|
...
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getProofs(p: Peer, proofs: openarray[ProofRequest])
|
proc getProofs(p: Peer, proofs: openArray[ProofRequest])
|
||||||
proc proofs(p: Peer, BV: uint, proofs: openarray[Blob])
|
proc proofs(p: Peer, BV: uint, proofs: openArray[Blob])
|
||||||
|
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
|
@ -19,8 +19,9 @@ requires "nim >= 1.2.0",
|
||||||
"testutils",
|
"testutils",
|
||||||
"unittest2"
|
"unittest2"
|
||||||
|
|
||||||
var commonParams = " --verbosity:0 --hints:off --skipUserCfg:on --warning[ObservableStores]:off " &
|
let commonParams = " --verbosity:0 --hints:off --skipUserCfg:on " &
|
||||||
getEnv("NIMFLAGS") & " "
|
"--warning[ObservableStores]:off --styleCheck:usages --styleCheck:hint " &
|
||||||
|
getEnv("NIMFLAGS") & " "
|
||||||
|
|
||||||
proc runTest(path: string, release: bool = true, chronosStrict = true) =
|
proc runTest(path: string, release: bool = true, chronosStrict = true) =
|
||||||
echo "\nBuilding and running: ", path
|
echo "\nBuilding and running: ", path
|
||||||
|
|
|
@ -28,12 +28,12 @@ proc init*(_: type BloomFilter, h: MDigest[256]): BloomFilter =
|
||||||
|
|
||||||
# TODO: The following 2 procs should be one genric, but it doesn't compile. Nim bug?
|
# TODO: The following 2 procs should be one genric, but it doesn't compile. Nim bug?
|
||||||
proc incl*(f: var BloomFilter, v: string) = f.incl(keccak256.digest(v))
|
proc incl*(f: var BloomFilter, v: string) = f.incl(keccak256.digest(v))
|
||||||
proc incl*(f: var BloomFilter, v: openarray[byte]) = f.incl(keccak256.digest(v))
|
proc incl*(f: var BloomFilter, v: openArray[byte]) = f.incl(keccak256.digest(v))
|
||||||
|
|
||||||
proc contains*(f: BloomFilter, h: MDigest[256]): bool =
|
proc contains*(f: BloomFilter, h: MDigest[256]): bool =
|
||||||
for bits in bloomBits(h):
|
for bits in bloomBits(h):
|
||||||
if (f.value and bits).isZero: return false
|
if (f.value and bits).isZero: return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
template contains*[T](f: BloomFilter, v: openarray[T]): bool =
|
template contains*[T](f: BloomFilter, v: openArray[T]): bool =
|
||||||
f.contains(keccak256.digest(v))
|
f.contains(keccak256.digest(v))
|
||||||
|
|
|
@ -304,13 +304,13 @@ proc append*(rlpWriter: var RlpWriter, value: StUint) =
|
||||||
else:
|
else:
|
||||||
rlpWriter.append(value.truncate(int))
|
rlpWriter.append(value.truncate(int))
|
||||||
|
|
||||||
proc read*(rlp: var Rlp, T: type Stint): T {.inline.} =
|
proc read*(rlp: var Rlp, T: type StInt): T {.inline.} =
|
||||||
# The Ethereum Yellow Paper defines the RLP serialization only
|
# The Ethereum Yellow Paper defines the RLP serialization only
|
||||||
# for unsigned integers:
|
# for unsigned integers:
|
||||||
{.fatal: "RLP serialization of signed integers is not allowed".}
|
{.fatal: "RLP serialization of signed integers is not allowed".}
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc append*(rlpWriter: var RlpWriter, value: Stint) =
|
proc append*(rlpWriter: var RlpWriter, value: StInt) =
|
||||||
# The Ethereum Yellow Paper defines the RLP serialization only
|
# The Ethereum Yellow Paper defines the RLP serialization only
|
||||||
# for unsigned integers:
|
# for unsigned integers:
|
||||||
{.fatal: "RLP serialization of signed integers is not allowed".}
|
{.fatal: "RLP serialization of signed integers is not allowed".}
|
||||||
|
@ -671,7 +671,7 @@ method getCodeByHash*(db: AbstractChainDB, hash: KeccakHash): Blob {.base, gcsaf
|
||||||
method getSetting*(db: AbstractChainDB, key: string): seq[byte] {.base, gcsafe.} =
|
method getSetting*(db: AbstractChainDB, key: string): seq[byte] {.base, gcsafe.} =
|
||||||
notImplemented()
|
notImplemented()
|
||||||
|
|
||||||
method setSetting*(db: AbstractChainDB, key: string, val: openarray[byte]) {.base, gcsafe.} =
|
method setSetting*(db: AbstractChainDB, key: string, val: openArray[byte]) {.base, gcsafe.} =
|
||||||
notImplemented()
|
notImplemented()
|
||||||
|
|
||||||
method getHeaderProof*(db: AbstractChainDB, req: ProofRequest): Blob {.base, gcsafe.} =
|
method getHeaderProof*(db: AbstractChainDB, req: ProofRequest): Blob {.base, gcsafe.} =
|
||||||
|
@ -686,10 +686,10 @@ method getHelperTrieProof*(db: AbstractChainDB, req: HelperTrieProofRequest): Bl
|
||||||
method getTransactionStatus*(db: AbstractChainDB, txHash: KeccakHash): TransactionStatusMsg {.base, gcsafe.} =
|
method getTransactionStatus*(db: AbstractChainDB, txHash: KeccakHash): TransactionStatusMsg {.base, gcsafe.} =
|
||||||
notImplemented()
|
notImplemented()
|
||||||
|
|
||||||
method addTransactions*(db: AbstractChainDB, transactions: openarray[Transaction]) {.base, gcsafe.} =
|
method addTransactions*(db: AbstractChainDB, transactions: openArray[Transaction]) {.base, gcsafe.} =
|
||||||
notImplemented()
|
notImplemented()
|
||||||
|
|
||||||
method persistBlocks*(db: AbstractChainDB, headers: openarray[BlockHeader], bodies: openarray[BlockBody]): ValidationResult {.base, gcsafe.} =
|
method persistBlocks*(db: AbstractChainDB, headers: openArray[BlockHeader], bodies: openArray[BlockBody]): ValidationResult {.base, gcsafe.} =
|
||||||
notImplemented()
|
notImplemented()
|
||||||
|
|
||||||
method getForkId*(db: AbstractChainDB, n: BlockNumber): ForkID {.base, gcsafe.} =
|
method getForkId*(db: AbstractChainDB, n: BlockNumber): ForkID {.base, gcsafe.} =
|
||||||
|
|
|
@ -13,19 +13,19 @@ type
|
||||||
RocksStoreRef* = ref object of RootObj
|
RocksStoreRef* = ref object of RootObj
|
||||||
store: RocksDBInstance
|
store: RocksDBInstance
|
||||||
|
|
||||||
proc get*(db: RocksStoreRef, key: openarray[byte], onData: kvstore.DataProc): KvResult[bool] =
|
proc get*(db: RocksStoreRef, key: openArray[byte], onData: kvstore.DataProc): KvResult[bool] =
|
||||||
db.store.get(key, onData)
|
db.store.get(key, onData)
|
||||||
|
|
||||||
proc find*(db: RocksStoreRef, prefix: openarray[byte], onFind: kvstore.KeyValueProc): KvResult[int] =
|
proc find*(db: RocksStoreRef, prefix: openArray[byte], onFind: kvstore.KeyValueProc): KvResult[int] =
|
||||||
raiseAssert "Unimplemented"
|
raiseAssert "Unimplemented"
|
||||||
|
|
||||||
proc put*(db: RocksStoreRef, key, value: openarray[byte]): KvResult[void] =
|
proc put*(db: RocksStoreRef, key, value: openArray[byte]): KvResult[void] =
|
||||||
db.store.put(key, value)
|
db.store.put(key, value)
|
||||||
|
|
||||||
proc contains*(db: RocksStoreRef, key: openarray[byte]): KvResult[bool] =
|
proc contains*(db: RocksStoreRef, key: openArray[byte]): KvResult[bool] =
|
||||||
db.store.contains(key)
|
db.store.contains(key)
|
||||||
|
|
||||||
proc del*(db: RocksStoreRef, key: openarray[byte]): KvResult[void] =
|
proc del*(db: RocksStoreRef, key: openArray[byte]): KvResult[void] =
|
||||||
db.store.del(key)
|
db.store.del(key)
|
||||||
|
|
||||||
proc close*(db: RocksStoreRef) =
|
proc close*(db: RocksStoreRef) =
|
||||||
|
|
|
@ -461,7 +461,7 @@ proc init*(
|
||||||
let
|
let
|
||||||
name =
|
name =
|
||||||
if inMemory: ":memory:"
|
if inMemory: ":memory:"
|
||||||
else: basepath / name & ".sqlite3"
|
else: basePath / name & ".sqlite3"
|
||||||
flags =
|
flags =
|
||||||
# For some reason, opening multiple in-memory databases doesn't work if
|
# For some reason, opening multiple in-memory databases doesn't work if
|
||||||
# one of them is read-only - for now, disable read-only mode for them
|
# one of them is read-only - for now, disable read-only mode for them
|
||||||
|
|
|
@ -221,8 +221,8 @@ proc deriveKey(password: string, salt: string,
|
||||||
|
|
||||||
proc encryptKey(seckey: PrivateKey,
|
proc encryptKey(seckey: PrivateKey,
|
||||||
cryptkind: CryptKind,
|
cryptkind: CryptKind,
|
||||||
key: openarray[byte],
|
key: openArray[byte],
|
||||||
iv: openarray[byte]): KfResult[array[KeyLength, byte]] =
|
iv: openArray[byte]): KfResult[array[KeyLength, byte]] =
|
||||||
if cryptkind == AES128CTR:
|
if cryptkind == AES128CTR:
|
||||||
var crypttext: array[KeyLength, byte]
|
var crypttext: array[KeyLength, byte]
|
||||||
var ctx: CTR[aes128]
|
var ctx: CTR[aes128]
|
||||||
|
@ -233,10 +233,10 @@ proc encryptKey(seckey: PrivateKey,
|
||||||
else:
|
else:
|
||||||
err(NotImplemented)
|
err(NotImplemented)
|
||||||
|
|
||||||
proc decryptKey(ciphertext: openarray[byte],
|
proc decryptKey(ciphertext: openArray[byte],
|
||||||
cryptkind: CryptKind,
|
cryptkind: CryptKind,
|
||||||
key: openarray[byte],
|
key: openArray[byte],
|
||||||
iv: openarray[byte]): KfResult[array[KeyLength, byte]] =
|
iv: openArray[byte]): KfResult[array[KeyLength, byte]] =
|
||||||
if cryptkind == AES128CTR:
|
if cryptkind == AES128CTR:
|
||||||
if len(iv) != aes128.sizeBlock:
|
if len(iv) != aes128.sizeBlock:
|
||||||
return err(IncorrectIV)
|
return err(IncorrectIV)
|
||||||
|
@ -295,7 +295,7 @@ proc decodeSalt(m: string): string =
|
||||||
else:
|
else:
|
||||||
result = ""
|
result = ""
|
||||||
|
|
||||||
proc compareMac(m1: openarray[byte], m2: openarray[byte]): bool =
|
proc compareMac(m1: openArray[byte], m2: openArray[byte]): bool =
|
||||||
if len(m1) == len(m2) and len(m1) > 0:
|
if len(m1) == len(m2) and len(m1) > 0:
|
||||||
result = equalMem(unsafeAddr m1[0], unsafeAddr m2[0], len(m1))
|
result = equalMem(unsafeAddr m1[0], unsafeAddr m2[0], len(m1))
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ logScope:
|
||||||
## Also does threadvar initialisation.
|
## Also does threadvar initialisation.
|
||||||
## Must be called before redirectPorts() in each thread.
|
## Must be called before redirectPorts() in each thread.
|
||||||
proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] =
|
proc getExternalIP*(natStrategy: NatStrategy, quiet = false): Option[IpAddress] =
|
||||||
var externalIP: IPAddress
|
var externalIP: IpAddress
|
||||||
|
|
||||||
if natStrategy == NatAny or natStrategy == NatUpnp:
|
if natStrategy == NatAny or natStrategy == NatUpnp:
|
||||||
if upnp == nil:
|
if upnp == nil:
|
||||||
|
@ -245,7 +245,7 @@ proc setupNat*(natStrategy: NatStrategy, tcpPort, udpPort: Port,
|
||||||
## original ports as best effort.
|
## original ports as best effort.
|
||||||
## TODO: Allow for tcp or udp port mapping to be optional.
|
## TODO: Allow for tcp or udp port mapping to be optional.
|
||||||
let extIp = getExternalIP(natStrategy)
|
let extIp = getExternalIP(natStrategy)
|
||||||
if extIP.isSome:
|
if extIp.isSome:
|
||||||
let ip = ValidIpAddress.init(extIp.get)
|
let ip = ValidIpAddress.init(extIp.get)
|
||||||
let extPorts = ({.gcsafe.}:
|
let extPorts = ({.gcsafe.}:
|
||||||
redirectPorts(tcpPort = tcpPort,
|
redirectPorts(tcpPort = tcpPort,
|
||||||
|
@ -308,7 +308,7 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress,
|
||||||
|
|
||||||
case natConfig.nat:
|
case natConfig.nat:
|
||||||
of NatAny:
|
of NatAny:
|
||||||
let bindAddress = initTAddress(bindIP, Port(0))
|
let bindAddress = initTAddress(bindIp, Port(0))
|
||||||
if bindAddress.isAnyLocal():
|
if bindAddress.isAnyLocal():
|
||||||
let ip = getRouteIpv4()
|
let ip = getRouteIpv4()
|
||||||
if ip.isErr():
|
if ip.isErr():
|
||||||
|
@ -326,11 +326,11 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress,
|
||||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||||
elif bindAddress.isPublic():
|
elif bindAddress.isPublic():
|
||||||
# When a specific public interface is provided, use that one.
|
# When a specific public interface is provided, use that one.
|
||||||
return (some(ValidIpAddress.init(bindIP)), some(tcpPort), some(udpPort))
|
return (some(ValidIpAddress.init(bindIp)), some(tcpPort), some(udpPort))
|
||||||
else:
|
else:
|
||||||
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
return setupNat(natConfig.nat, tcpPort, udpPort, clientId)
|
||||||
of NatNone:
|
of NatNone:
|
||||||
let bindAddress = initTAddress(bindIP, Port(0))
|
let bindAddress = initTAddress(bindIp, Port(0))
|
||||||
if bindAddress.isAnyLocal():
|
if bindAddress.isAnyLocal():
|
||||||
let ip = getRouteIpv4()
|
let ip = getRouteIpv4()
|
||||||
if ip.isErr():
|
if ip.isErr():
|
||||||
|
@ -345,7 +345,7 @@ proc setupAddress*(natConfig: NatConfig, bindIp: ValidIpAddress,
|
||||||
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
||||||
elif bindAddress.isPublic():
|
elif bindAddress.isPublic():
|
||||||
# When a specific public interface is provided, use that one.
|
# When a specific public interface is provided, use that one.
|
||||||
return (some(ValidIpAddress.init(bindIP)), some(tcpPort), some(udpPort))
|
return (some(ValidIpAddress.init(bindIp)), some(tcpPort), some(udpPort))
|
||||||
else:
|
else:
|
||||||
error "Bind IP is not a public IP address. Should not use --nat:none option"
|
error "Bind IP is not a public IP address. Should not use --nat:none option"
|
||||||
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
return (none(ValidIpAddress), some(tcpPort), some(udpPort))
|
||||||
|
|
|
@ -53,7 +53,7 @@ type
|
||||||
HandshakeFlag* = enum
|
HandshakeFlag* = enum
|
||||||
Initiator, ## `Handshake` owner is connection initiator
|
Initiator, ## `Handshake` owner is connection initiator
|
||||||
Responder, ## `Handshake` owner is connection responder
|
Responder, ## `Handshake` owner is connection responder
|
||||||
Eip8 ## Flag indicates that EIP-8 handshake is used
|
EIP8 ## Flag indicates that EIP-8 handshake is used
|
||||||
|
|
||||||
AuthError* = enum
|
AuthError* = enum
|
||||||
EcdhError = "auth: ECDH shared secret could not be calculated"
|
EcdhError = "auth: ECDH shared secret could not be calculated"
|
||||||
|
@ -127,7 +127,7 @@ proc tryInit*(
|
||||||
proc authMessagePreEIP8(h: var Handshake,
|
proc authMessagePreEIP8(h: var Handshake,
|
||||||
rng: var BrHmacDrbgContext,
|
rng: var BrHmacDrbgContext,
|
||||||
pubkey: PublicKey,
|
pubkey: PublicKey,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
flag: byte = 0,
|
flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
|
@ -166,7 +166,7 @@ proc authMessagePreEIP8(h: var Handshake,
|
||||||
proc authMessageEIP8(h: var Handshake,
|
proc authMessageEIP8(h: var Handshake,
|
||||||
rng: var BrHmacDrbgContext,
|
rng: var BrHmacDrbgContext,
|
||||||
pubkey: PublicKey,
|
pubkey: PublicKey,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
flag: byte = 0,
|
flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
|
@ -225,7 +225,7 @@ proc authMessageEIP8(h: var Handshake,
|
||||||
|
|
||||||
proc ackMessagePreEIP8(h: var Handshake,
|
proc ackMessagePreEIP8(h: var Handshake,
|
||||||
rng: var BrHmacDrbgContext,
|
rng: var BrHmacDrbgContext,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
flag: byte = 0,
|
flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
|
@ -252,7 +252,7 @@ proc ackMessagePreEIP8(h: var Handshake,
|
||||||
|
|
||||||
proc ackMessageEIP8(h: var Handshake,
|
proc ackMessageEIP8(h: var Handshake,
|
||||||
rng: var BrHmacDrbgContext,
|
rng: var BrHmacDrbgContext,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
flag: byte = 0,
|
flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
|
@ -314,7 +314,7 @@ template ackSize*(h: Handshake, encrypt: bool = true): int =
|
||||||
|
|
||||||
proc authMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
proc authMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
||||||
pubkey: PublicKey,
|
pubkey: PublicKey,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int, flag: byte = 0,
|
outlen: var int, flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
## Create new AuthMessage for specified `pubkey` and store it inside
|
## Create new AuthMessage for specified `pubkey` and store it inside
|
||||||
|
@ -325,7 +325,7 @@ proc authMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
||||||
authMessagePreEIP8(h, rng, pubkey, output, outlen, flag, encrypt)
|
authMessagePreEIP8(h, rng, pubkey, output, outlen, flag, encrypt)
|
||||||
|
|
||||||
proc ackMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
proc ackMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
outlen: var int, flag: byte = 0,
|
outlen: var int, flag: byte = 0,
|
||||||
encrypt: bool = true): AuthResult[void] =
|
encrypt: bool = true): AuthResult[void] =
|
||||||
## Create new AckMessage and store it inside of `output`, size of generated
|
## Create new AckMessage and store it inside of `output`, size of generated
|
||||||
|
@ -335,7 +335,7 @@ proc ackMessage*(h: var Handshake, rng: var BrHmacDrbgContext,
|
||||||
else:
|
else:
|
||||||
ackMessagePreEIP8(h, rng, output, outlen, flag, encrypt)
|
ackMessagePreEIP8(h, rng, output, outlen, flag, encrypt)
|
||||||
|
|
||||||
proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] =
|
proc decodeAuthMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes V4 AuthMessage.
|
## Decodes V4 AuthMessage.
|
||||||
var
|
var
|
||||||
buffer: array[PlainAuthMessageV4Length, byte]
|
buffer: array[PlainAuthMessageV4Length, byte]
|
||||||
|
@ -361,7 +361,7 @@ proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void]
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc decodeAuthMessageEip8(h: var Handshake, m: openarray[byte]): AuthResult[void] =
|
proc decodeAuthMessageEIP8(h: var Handshake, m: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes EIP-8 AuthMessage.
|
## Decodes EIP-8 AuthMessage.
|
||||||
let size = uint16.fromBytesBE(m)
|
let size = uint16.fromBytesBE(m)
|
||||||
h.expectedLength = int(size) + 2
|
h.expectedLength = int(size) + 2
|
||||||
|
@ -408,7 +408,7 @@ proc decodeAuthMessageEip8(h: var Handshake, m: openarray[byte]): AuthResult[voi
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
err(RlpError)
|
err(RlpError)
|
||||||
|
|
||||||
proc decodeAckMessageEip8*(h: var Handshake, m: openarray[byte]): AuthResult[void] =
|
proc decodeAckMessageEIP8*(h: var Handshake, m: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes EIP-8 AckMessage.
|
## Decodes EIP-8 AckMessage.
|
||||||
let size = uint16.fromBytesBE(m)
|
let size = uint16.fromBytesBE(m)
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ proc decodeAckMessageEip8*(h: var Handshake, m: openarray[byte]): AuthResult[voi
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
err(RlpError)
|
err(RlpError)
|
||||||
|
|
||||||
proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void] =
|
proc decodeAckMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes V4 AckMessage.
|
## Decodes V4 AckMessage.
|
||||||
var
|
var
|
||||||
buffer: array[PlainAckMessageV4Length, byte]
|
buffer: array[PlainAckMessageV4Length, byte]
|
||||||
|
@ -457,7 +457,7 @@ proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthResult[void]
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc decodeAuthMessage*(h: var Handshake, input: openarray[byte]): AuthResult[void] =
|
proc decodeAuthMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes AuthMessage from `input`.
|
## Decodes AuthMessage from `input`.
|
||||||
if len(input) < AuthMessageV4Length:
|
if len(input) < AuthMessageV4Length:
|
||||||
return err(IncompleteError)
|
return err(IncompleteError)
|
||||||
|
@ -466,12 +466,12 @@ proc decodeAuthMessage*(h: var Handshake, input: openarray[byte]): AuthResult[vo
|
||||||
let res = h.decodeAuthMessageV4(input)
|
let res = h.decodeAuthMessageV4(input)
|
||||||
if res.isOk(): return res
|
if res.isOk(): return res
|
||||||
|
|
||||||
let res = h.decodeAuthMessageEip8(input)
|
let res = h.decodeAuthMessageEIP8(input)
|
||||||
if res.isOk():
|
if res.isOk():
|
||||||
h.flags.incl(EIP8)
|
h.flags.incl(EIP8)
|
||||||
res
|
res
|
||||||
|
|
||||||
proc decodeAckMessage*(h: var Handshake, input: openarray[byte]): AuthResult[void] =
|
proc decodeAckMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] =
|
||||||
## Decodes AckMessage from `input`.
|
## Decodes AckMessage from `input`.
|
||||||
if len(input) < AckMessageV4Length:
|
if len(input) < AckMessageV4Length:
|
||||||
return err(IncompleteError)
|
return err(IncompleteError)
|
||||||
|
@ -479,13 +479,13 @@ proc decodeAckMessage*(h: var Handshake, input: openarray[byte]): AuthResult[voi
|
||||||
let res = h.decodeAckMessageV4(input)
|
let res = h.decodeAckMessageV4(input)
|
||||||
if res.isOk(): return res
|
if res.isOk(): return res
|
||||||
|
|
||||||
let res = h.decodeAckMessageEip8(input)
|
let res = h.decodeAckMessageEIP8(input)
|
||||||
if res.isOk(): h.flags.incl(EIP8)
|
if res.isOk(): h.flags.incl(EIP8)
|
||||||
res
|
res
|
||||||
|
|
||||||
proc getSecrets*(
|
proc getSecrets*(
|
||||||
h: Handshake, authmsg: openarray[byte],
|
h: Handshake, authmsg: openArray[byte],
|
||||||
ackmsg: openarray[byte]): ConnectionSecret =
|
ackmsg: openArray[byte]): ConnectionSecret =
|
||||||
## Derive secrets from handshake `h` using encrypted AuthMessage `authmsg` and
|
## Derive secrets from handshake `h` using encrypted AuthMessage `authmsg` and
|
||||||
## encrypted AckMessage `ackmsg`.
|
## encrypted AckMessage `ackmsg`.
|
||||||
var
|
var
|
||||||
|
|
|
@ -30,7 +30,7 @@ proc getBlockHeaders*(db: AbstractChainDB, req: BlocksRequest): seq[BlockHeader]
|
||||||
|
|
||||||
template fetcher*(fetcherName, fetchingFunc, InputType, ResultType: untyped) =
|
template fetcher*(fetcherName, fetchingFunc, InputType, ResultType: untyped) =
|
||||||
proc fetcherName*(db: AbstractChainDB,
|
proc fetcherName*(db: AbstractChainDB,
|
||||||
lookups: openarray[InputType]): seq[ResultType] {.gcsafe.} =
|
lookups: openArray[InputType]): seq[ResultType] {.gcsafe.} =
|
||||||
for lookup in lookups:
|
for lookup in lookups:
|
||||||
let fetched = fetchingFunc(db, lookup)
|
let fetched = fetchingFunc(db, lookup)
|
||||||
if fetched.hasData:
|
if fetched.hasData:
|
||||||
|
@ -47,6 +47,6 @@ fetcher getProofs, getProof, ProofRequest, Blob
|
||||||
fetcher getHeaderProofs, getHeaderProof, ProofRequest, Blob
|
fetcher getHeaderProofs, getHeaderProof, ProofRequest, Blob
|
||||||
|
|
||||||
proc getHelperTrieProofs*(db: AbstractChainDB,
|
proc getHelperTrieProofs*(db: AbstractChainDB,
|
||||||
reqs: openarray[HelperTrieProofRequest],
|
reqs: openArray[HelperTrieProofRequest],
|
||||||
outNodes: var seq[Blob], outAuxData: var seq[Blob]) =
|
outNodes: var seq[Blob], outAuxData: var seq[Blob]) =
|
||||||
discard
|
discard
|
||||||
|
|
|
@ -141,7 +141,7 @@ proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) =
|
||||||
template flush() =
|
template flush() =
|
||||||
block:
|
block:
|
||||||
let payload = rlp.encode((nodes, expiration()))
|
let payload = rlp.encode((nodes, expiration()))
|
||||||
let msg = pack(cmdNeighbours, payload, d.privkey)
|
let msg = pack(cmdNeighbours, payload, d.privKey)
|
||||||
trace "Neighbours to", node, nodes
|
trace "Neighbours to", node, nodes
|
||||||
d.send(node, msg)
|
d.send(node, msg)
|
||||||
nodes.setLen(0)
|
nodes.setLen(0)
|
||||||
|
@ -155,7 +155,7 @@ proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) =
|
||||||
if nodes.len != 0: flush()
|
if nodes.len != 0: flush()
|
||||||
|
|
||||||
proc newDiscoveryProtocol*(privKey: PrivateKey, address: Address,
|
proc newDiscoveryProtocol*(privKey: PrivateKey, address: Address,
|
||||||
bootstrapNodes: openarray[ENode], rng = newRng()
|
bootstrapNodes: openArray[ENode], rng = newRng()
|
||||||
): DiscoveryProtocol =
|
): DiscoveryProtocol =
|
||||||
result.new()
|
result.new()
|
||||||
result.privKey = privKey
|
result.privKey = privKey
|
||||||
|
@ -214,7 +214,7 @@ proc recvFindNode(d: DiscoveryProtocol, node: Node, payload: openArray[byte])
|
||||||
let rng = rlp.listElem(0).toBytes
|
let rng = rlp.listElem(0).toBytes
|
||||||
# Check for pubkey len
|
# Check for pubkey len
|
||||||
if rng.len == 64:
|
if rng.len == 64:
|
||||||
let nodeId = readUIntBE[256](rng[32 .. ^1])
|
let nodeId = readUintBE[256](rng[32 .. ^1])
|
||||||
d.kademlia.recvFindNode(node, nodeId)
|
d.kademlia.recvFindNode(node, nodeId)
|
||||||
else:
|
else:
|
||||||
trace "Invalid target public key received"
|
trace "Invalid target public key received"
|
||||||
|
|
|
@ -187,7 +187,7 @@ proc run(config: DiscoveryConf) =
|
||||||
echo "No Talk Response message returned"
|
echo "No Talk Response message returned"
|
||||||
of noCommand:
|
of noCommand:
|
||||||
d.start()
|
d.start()
|
||||||
waitfor(discover(d))
|
waitFor(discover(d))
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
let config = DiscoveryConf.load()
|
let config = DiscoveryConf.load()
|
||||||
|
|
|
@ -89,7 +89,7 @@ type
|
||||||
Codec* = object
|
Codec* = object
|
||||||
localNode*: Node
|
localNode*: Node
|
||||||
privKey*: PrivateKey
|
privKey*: PrivateKey
|
||||||
handshakes*: Table[HandShakeKey, Challenge]
|
handshakes*: Table[HandshakeKey, Challenge]
|
||||||
sessions*: Sessions
|
sessions*: Sessions
|
||||||
|
|
||||||
DecodeResult*[T] = Result[T, cstring]
|
DecodeResult*[T] = Result[T, cstring]
|
||||||
|
@ -101,7 +101,7 @@ func hash*(key: HandshakeKey): Hash =
|
||||||
result = key.nodeId.hash !& key.address.hash
|
result = key.nodeId.hash !& key.address.hash
|
||||||
result = !$result
|
result = !$result
|
||||||
|
|
||||||
proc idHash(challengeData, ephkey: openarray[byte], nodeId: NodeId):
|
proc idHash(challengeData, ephkey: openArray[byte], nodeId: NodeId):
|
||||||
MDigest[256] =
|
MDigest[256] =
|
||||||
var ctx: sha256
|
var ctx: sha256
|
||||||
ctx.init()
|
ctx.init()
|
||||||
|
@ -113,16 +113,16 @@ proc idHash(challengeData, ephkey: openarray[byte], nodeId: NodeId):
|
||||||
ctx.clear()
|
ctx.clear()
|
||||||
|
|
||||||
proc createIdSignature*(privKey: PrivateKey, challengeData,
|
proc createIdSignature*(privKey: PrivateKey, challengeData,
|
||||||
ephKey: openarray[byte], nodeId: NodeId): SignatureNR =
|
ephKey: openArray[byte], nodeId: NodeId): SignatureNR =
|
||||||
signNR(privKey, SkMessage(idHash(challengeData, ephKey, nodeId).data))
|
signNR(privKey, SkMessage(idHash(challengeData, ephKey, nodeId).data))
|
||||||
|
|
||||||
proc verifyIdSignature*(sig: SignatureNR, challengeData, ephKey: openarray[byte],
|
proc verifyIdSignature*(sig: SignatureNR, challengeData, ephKey: openArray[byte],
|
||||||
nodeId: NodeId, pubKey: PublicKey): bool =
|
nodeId: NodeId, pubkey: PublicKey): bool =
|
||||||
let h = idHash(challengeData, ephKey, nodeId)
|
let h = idHash(challengeData, ephKey, nodeId)
|
||||||
verify(sig, SkMessage(h.data), pubKey)
|
verify(sig, SkMessage(h.data), pubkey)
|
||||||
|
|
||||||
proc deriveKeys*(n1, n2: NodeID, priv: PrivateKey, pub: PublicKey,
|
proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey,
|
||||||
challengeData: openarray[byte]): HandshakeSecrets =
|
challengeData: openArray[byte]): HandshakeSecrets =
|
||||||
let eph = ecdhRawFull(priv, pub)
|
let eph = ecdhRawFull(priv, pub)
|
||||||
|
|
||||||
var info = newSeqOfCap[byte](keyAgreementPrefix.len + 32 * 2)
|
var info = newSeqOfCap[byte](keyAgreementPrefix.len + 32 * 2)
|
||||||
|
@ -138,7 +138,7 @@ proc deriveKeys*(n1, n2: NodeID, priv: PrivateKey, pub: PublicKey,
|
||||||
toOpenArray(res, 0, sizeof(secrets) - 1))
|
toOpenArray(res, 0, sizeof(secrets) - 1))
|
||||||
secrets
|
secrets
|
||||||
|
|
||||||
proc encryptGCM*(key: AesKey, nonce, pt, authData: openarray[byte]): seq[byte] =
|
proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] =
|
||||||
var ectx: GCM[aes128]
|
var ectx: GCM[aes128]
|
||||||
ectx.init(key, nonce, authData)
|
ectx.init(key, nonce, authData)
|
||||||
result = newSeq[byte](pt.len + gcmTagSize)
|
result = newSeq[byte](pt.len + gcmTagSize)
|
||||||
|
@ -146,7 +146,7 @@ proc encryptGCM*(key: AesKey, nonce, pt, authData: openarray[byte]): seq[byte] =
|
||||||
ectx.getTag(result.toOpenArray(pt.len, result.high))
|
ectx.getTag(result.toOpenArray(pt.len, result.high))
|
||||||
ectx.clear()
|
ectx.clear()
|
||||||
|
|
||||||
proc decryptGCM*(key: AesKey, nonce, ct, authData: openarray[byte]):
|
proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
|
||||||
Option[seq[byte]] =
|
Option[seq[byte]] =
|
||||||
if ct.len <= gcmTagSize:
|
if ct.len <= gcmTagSize:
|
||||||
debug "cipher is missing tag", len = ct.len
|
debug "cipher is missing tag", len = ct.len
|
||||||
|
@ -165,14 +165,14 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openarray[byte]):
|
||||||
|
|
||||||
return some(res)
|
return some(res)
|
||||||
|
|
||||||
proc encryptHeader*(id: NodeId, iv, header: openarray[byte]): seq[byte] =
|
proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] =
|
||||||
var ectx: CTR[aes128]
|
var ectx: CTR[aes128]
|
||||||
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
|
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
|
||||||
result = newSeq[byte](header.len)
|
result = newSeq[byte](header.len)
|
||||||
ectx.encrypt(header, result)
|
ectx.encrypt(header, result)
|
||||||
ectx.clear()
|
ectx.clear()
|
||||||
|
|
||||||
proc hasHandshake*(c: Codec, key: HandShakeKey): bool =
|
proc hasHandshake*(c: Codec, key: HandshakeKey): bool =
|
||||||
c.handshakes.hasKey(key)
|
c.handshakes.hasKey(key)
|
||||||
|
|
||||||
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
|
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
|
||||||
|
@ -185,7 +185,7 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
|
||||||
result.add((uint16(authSize)).toBytesBE())
|
result.add((uint16(authSize)).toBytesBE())
|
||||||
|
|
||||||
proc encodeMessagePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
proc encodeMessagePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
toId: NodeID, toAddr: Address, message: openarray[byte]):
|
toId: NodeId, toAddr: Address, message: openArray[byte]):
|
||||||
(seq[byte], AESGCMNonce) =
|
(seq[byte], AESGCMNonce) =
|
||||||
var nonce: AESGCMNonce
|
var nonce: AESGCMNonce
|
||||||
brHmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
|
brHmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
|
||||||
|
@ -228,7 +228,7 @@ proc encodeMessagePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
return (packet, nonce)
|
return (packet, nonce)
|
||||||
|
|
||||||
proc encodeWhoareyouPacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
proc encodeWhoareyouPacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
toId: NodeID, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
|
toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
|
||||||
pubkey: Option[PublicKey]): seq[byte] =
|
pubkey: Option[PublicKey]): seq[byte] =
|
||||||
var idNonce: IdNonce
|
var idNonce: IdNonce
|
||||||
brHmacDrbgGenerate(rng, idNonce)
|
brHmacDrbgGenerate(rng, idNonce)
|
||||||
|
@ -263,14 +263,14 @@ proc encodeWhoareyouPacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
recordSeq: recordSeq,
|
recordSeq: recordSeq,
|
||||||
challengeData: @iv & header)
|
challengeData: @iv & header)
|
||||||
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
||||||
key = HandShakeKey(nodeId: toId, address: toAddr)
|
key = HandshakeKey(nodeId: toId, address: toAddr)
|
||||||
|
|
||||||
c.handshakes[key] = challenge
|
c.handshakes[key] = challenge
|
||||||
|
|
||||||
return packet
|
return packet
|
||||||
|
|
||||||
proc encodeHandshakePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
proc encodeHandshakePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
toId: NodeID, toAddr: Address, message: openarray[byte],
|
toId: NodeId, toAddr: Address, message: openArray[byte],
|
||||||
whoareyouData: WhoareyouData, pubkey: PublicKey): seq[byte] =
|
whoareyouData: WhoareyouData, pubkey: PublicKey): seq[byte] =
|
||||||
var header: seq[byte]
|
var header: seq[byte]
|
||||||
var nonce: AESGCMNonce
|
var nonce: AESGCMNonce
|
||||||
|
@ -321,7 +321,7 @@ proc encodeHandshakePacket*(rng: var BrHmacDrbgContext, c: var Codec,
|
||||||
|
|
||||||
return packet
|
return packet
|
||||||
|
|
||||||
proc decodeHeader*(id: NodeId, iv, maskedHeader: openarray[byte]):
|
proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]):
|
||||||
DecodeResult[(StaticHeader, seq[byte])] =
|
DecodeResult[(StaticHeader, seq[byte])] =
|
||||||
# No need to check staticHeader size as that is included in minimum packet
|
# No need to check staticHeader size as that is included in minimum packet
|
||||||
# size check in decodePacket
|
# size check in decodePacket
|
||||||
|
@ -361,7 +361,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openarray[byte]):
|
||||||
ok((StaticHeader(authdataSize: authdataSize, flag: flag, nonce: nonce),
|
ok((StaticHeader(authdataSize: authdataSize, flag: flag, nonce: nonce),
|
||||||
staticHeader & authdata))
|
staticHeader & authdata))
|
||||||
|
|
||||||
proc decodeMessage*(body: openarray[byte]): DecodeResult[Message] =
|
proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] =
|
||||||
## Decodes to the specific `Message` type.
|
## Decodes to the specific `Message` type.
|
||||||
if body.len < 1:
|
if body.len < 1:
|
||||||
return err("No message data")
|
return err("No message data")
|
||||||
|
@ -388,7 +388,7 @@ proc decodeMessage*(body: openarray[byte]): DecodeResult[Message] =
|
||||||
of unused: return err("Invalid message type")
|
of unused: return err("Invalid message type")
|
||||||
of ping: rlp.decode(message.ping)
|
of ping: rlp.decode(message.ping)
|
||||||
of pong: rlp.decode(message.pong)
|
of pong: rlp.decode(message.pong)
|
||||||
of findNode: rlp.decode(message.findNode)
|
of findnode: rlp.decode(message.findNode)
|
||||||
of nodes: rlp.decode(message.nodes)
|
of nodes: rlp.decode(message.nodes)
|
||||||
of talkreq: rlp.decode(message.talkreq)
|
of talkreq: rlp.decode(message.talkreq)
|
||||||
of talkresp: rlp.decode(message.talkresp)
|
of talkresp: rlp.decode(message.talkresp)
|
||||||
|
@ -484,7 +484,7 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
|
||||||
if header.len < staticHeaderSize + authdataHeadSize + int(sigSize) + int(ephKeySize):
|
if header.len < staticHeaderSize + authdataHeadSize + int(sigSize) + int(ephKeySize):
|
||||||
return err("Invalid header for handshake message packet")
|
return err("Invalid header for handshake message packet")
|
||||||
|
|
||||||
let key = HandShakeKey(nodeId: srcId, address: fromAddr)
|
let key = HandshakeKey(nodeId: srcId, address: fromAddr)
|
||||||
var challenge: Challenge
|
var challenge: Challenge
|
||||||
if not c.handshakes.pop(key, challenge):
|
if not c.handshakes.pop(key, challenge):
|
||||||
return err("No challenge found: timed out or unsolicited packet")
|
return err("No challenge found: timed out or unsolicited packet")
|
||||||
|
@ -508,7 +508,7 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
|
||||||
except RlpError, ValueError:
|
except RlpError, ValueError:
|
||||||
return err("Invalid encoded ENR")
|
return err("Invalid encoded ENR")
|
||||||
|
|
||||||
var pubKey: PublicKey
|
var pubkey: PublicKey
|
||||||
var newNode: Option[Node]
|
var newNode: Option[Node]
|
||||||
# TODO: Shall we return Node or Record? Record makes more sense, but we do
|
# TODO: Shall we return Node or Record? Record makes more sense, but we do
|
||||||
# need the pubkey and the nodeid
|
# need the pubkey and the nodeid
|
||||||
|
@ -520,12 +520,12 @@ proc decodeHandshakePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
|
||||||
|
|
||||||
# Note: Not checking if the record seqNum is higher than the one we might
|
# Note: Not checking if the record seqNum is higher than the one we might
|
||||||
# have stored as it comes from this node directly.
|
# have stored as it comes from this node directly.
|
||||||
pubKey = node.pubKey
|
pubkey = node.pubkey
|
||||||
newNode = some(node)
|
newNode = some(node)
|
||||||
else:
|
else:
|
||||||
# TODO: Hmm, should we still verify node id of the ENR of this node?
|
# TODO: Hmm, should we still verify node id of the ENR of this node?
|
||||||
if challenge.pubkey.isSome():
|
if challenge.pubkey.isSome():
|
||||||
pubKey = challenge.pubkey.get()
|
pubkey = challenge.pubkey.get()
|
||||||
else:
|
else:
|
||||||
# We should have received a Record in this case.
|
# We should have received a Record in this case.
|
||||||
return err("Missing ENR in handshake packet")
|
return err("Missing ENR in handshake packet")
|
||||||
|
|
|
@ -94,9 +94,9 @@ proc `==`(a, b: Field): bool =
|
||||||
proc cmp(a, b: FieldPair): int = cmp(a[0], b[0])
|
proc cmp(a, b: FieldPair): int = cmp(a[0], b[0])
|
||||||
|
|
||||||
proc makeEnrRaw(seqNum: uint64, pk: PrivateKey,
|
proc makeEnrRaw(seqNum: uint64, pk: PrivateKey,
|
||||||
pairs: openarray[FieldPair]): EnrResult[seq[byte]] =
|
pairs: openArray[FieldPair]): EnrResult[seq[byte]] =
|
||||||
proc append(w: var RlpWriter, seqNum: uint64,
|
proc append(w: var RlpWriter, seqNum: uint64,
|
||||||
pairs: openarray[FieldPair]): seq[byte] =
|
pairs: openArray[FieldPair]): seq[byte] =
|
||||||
w.append(seqNum)
|
w.append(seqNum)
|
||||||
for (k, v) in pairs:
|
for (k, v) in pairs:
|
||||||
w.append(k)
|
w.append(k)
|
||||||
|
@ -124,7 +124,7 @@ proc makeEnrRaw(seqNum: uint64, pk: PrivateKey,
|
||||||
ok(raw)
|
ok(raw)
|
||||||
|
|
||||||
proc makeEnrAux(seqNum: uint64, pk: PrivateKey,
|
proc makeEnrAux(seqNum: uint64, pk: PrivateKey,
|
||||||
pairs: openarray[FieldPair]): EnrResult[Record] =
|
pairs: openArray[FieldPair]): EnrResult[Record] =
|
||||||
var record: Record
|
var record: Record
|
||||||
record.pairs = @pairs
|
record.pairs = @pairs
|
||||||
record.seqNum = seqNum
|
record.seqNum = seqNum
|
||||||
|
@ -185,7 +185,7 @@ proc init*(T: type Record, seqNum: uint64,
|
||||||
pk: PrivateKey,
|
pk: PrivateKey,
|
||||||
ip: Option[ValidIpAddress],
|
ip: Option[ValidIpAddress],
|
||||||
tcpPort, udpPort: Option[Port],
|
tcpPort, udpPort: Option[Port],
|
||||||
extraFields: openarray[FieldPair] = []):
|
extraFields: openArray[FieldPair] = []):
|
||||||
EnrResult[T] =
|
EnrResult[T] =
|
||||||
## Initialize a `Record` with given sequence number, private key, optional
|
## Initialize a `Record` with given sequence number, private key, optional
|
||||||
## ip address, tcp port, udp port, and optional custom k:v pairs.
|
## ip address, tcp port, udp port, and optional custom k:v pairs.
|
||||||
|
@ -263,7 +263,7 @@ proc find(r: Record, key: string): Option[int] =
|
||||||
return some(i)
|
return some(i)
|
||||||
|
|
||||||
proc update*(record: var Record, pk: PrivateKey,
|
proc update*(record: var Record, pk: PrivateKey,
|
||||||
fieldPairs: openarray[FieldPair]): EnrResult[void] =
|
fieldPairs: openArray[FieldPair]): EnrResult[void] =
|
||||||
## Update a `Record` k:v pairs.
|
## Update a `Record` k:v pairs.
|
||||||
##
|
##
|
||||||
## In case any of the k:v pairs is updated or added (new), the sequence number
|
## In case any of the k:v pairs is updated or added (new), the sequence number
|
||||||
|
@ -306,7 +306,7 @@ proc update*(record: var Record, pk: PrivateKey,
|
||||||
proc update*(r: var Record, pk: PrivateKey,
|
proc update*(r: var Record, pk: PrivateKey,
|
||||||
ip: Option[ValidIpAddress],
|
ip: Option[ValidIpAddress],
|
||||||
tcpPort, udpPort: Option[Port] = none[Port](),
|
tcpPort, udpPort: Option[Port] = none[Port](),
|
||||||
extraFields: openarray[FieldPair] = []):
|
extraFields: openArray[FieldPair] = []):
|
||||||
EnrResult[void] =
|
EnrResult[void] =
|
||||||
## Update a `Record` with given ip address, tcp port, udp port and optional
|
## Update a `Record` with given ip address, tcp port, udp port and optional
|
||||||
## custom k:v pairs.
|
## custom k:v pairs.
|
||||||
|
@ -362,7 +362,7 @@ proc contains*(r: Record, fp: (string, seq[byte])): bool =
|
||||||
if field.get() == fp[1]:
|
if field.get() == fp[1]:
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc verifySignatureV4(r: Record, sigData: openarray[byte], content: seq[byte]):
|
proc verifySignatureV4(r: Record, sigData: openArray[byte], content: seq[byte]):
|
||||||
bool =
|
bool =
|
||||||
let publicKey = r.get(PublicKey)
|
let publicKey = r.get(PublicKey)
|
||||||
if publicKey.isSome:
|
if publicKey.isSome:
|
||||||
|
@ -441,7 +441,7 @@ proc fromBytesAux(r: var Record): bool {.raises: [RlpError, Defect].} =
|
||||||
|
|
||||||
verifySignature(r)
|
verifySignature(r)
|
||||||
|
|
||||||
proc fromBytes*(r: var Record, s: openarray[byte]): bool =
|
proc fromBytes*(r: var Record, s: openArray[byte]): bool =
|
||||||
## Loads ENR from rlp-encoded bytes, and validates the signature.
|
## Loads ENR from rlp-encoded bytes, and validates the signature.
|
||||||
r.raw = @s
|
r.raw = @s
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import nimcrypto
|
import nimcrypto
|
||||||
|
|
||||||
proc hkdf*(HashType: typedesc, ikm, salt, info: openarray[byte],
|
proc hkdf*(HashType: typedesc, ikm, salt, info: openArray[byte],
|
||||||
output: var openarray[byte]) =
|
output: var openArray[byte]) =
|
||||||
var ctx: HMAC[HashType]
|
var ctx: HMAC[HashType]
|
||||||
ctx.init(salt)
|
ctx.init(salt)
|
||||||
ctx.update(ikm)
|
ctx.update(ikm)
|
||||||
|
|
|
@ -77,7 +77,7 @@ type
|
||||||
of pong:
|
of pong:
|
||||||
pong*: PongMessage
|
pong*: PongMessage
|
||||||
of findnode:
|
of findnode:
|
||||||
findNode*: FindNodeMessage
|
findnode*: FindNodeMessage
|
||||||
of nodes:
|
of nodes:
|
||||||
nodes*: NodesMessage
|
nodes*: NodesMessage
|
||||||
of talkreq:
|
of talkreq:
|
||||||
|
@ -98,7 +98,7 @@ type
|
||||||
template messageKind*(T: typedesc[SomeMessage]): MessageKind =
|
template messageKind*(T: typedesc[SomeMessage]): MessageKind =
|
||||||
when T is PingMessage: ping
|
when T is PingMessage: ping
|
||||||
elif T is PongMessage: pong
|
elif T is PongMessage: pong
|
||||||
elif T is FindNodeMessage: findNode
|
elif T is FindNodeMessage: findnode
|
||||||
elif T is NodesMessage: nodes
|
elif T is NodesMessage: nodes
|
||||||
elif T is TalkReqMessage: talkreq
|
elif T is TalkReqMessage: talkreq
|
||||||
elif T is TalkRespMessage: talkresp
|
elif T is TalkRespMessage: talkresp
|
||||||
|
|
|
@ -60,7 +60,7 @@ func newNode*(r: Record): Result[Node, cstring] =
|
||||||
|
|
||||||
func update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress],
|
func update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress],
|
||||||
tcpPort, udpPort: Option[Port] = none[Port](),
|
tcpPort, udpPort: Option[Port] = none[Port](),
|
||||||
extraFields: openarray[FieldPair] = []): Result[void, cstring] =
|
extraFields: openArray[FieldPair] = []): Result[void, cstring] =
|
||||||
? n.record.update(pk, ip, tcpPort, udpPort, extraFields)
|
? n.record.update(pk, ip, tcpPort, udpPort, extraFields)
|
||||||
|
|
||||||
if ip.isSome():
|
if ip.isSome():
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sets, options],
|
std/[sets, options],
|
||||||
stew/results, stew/shims/net, chronicles, chronos,
|
stew/results, stew/shims/net, chronicles, chronos,
|
||||||
"."/[node, enr, routing_table]
|
"."/[node, enr, routing_table]
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
|
@ -25,7 +25,7 @@ proc validIp(sender, address: IpAddress): bool =
|
||||||
# https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
|
# https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc verifyNodesRecords(enrs: openarray[Record], fromNode: Node, nodesLimit: int,
|
proc verifyNodesRecords(enrs: openArray[Record], fromNode: Node, nodesLimit: int,
|
||||||
distances: Option[seq[uint16]]): seq[Node] =
|
distances: Option[seq[uint16]]): seq[Node] =
|
||||||
## Verify and convert ENRs to a sequence of nodes. Only ENRs that pass
|
## Verify and convert ENRs to a sequence of nodes. Only ENRs that pass
|
||||||
## verification will be added. ENRs are verified for duplicates, invalid
|
## verification will be added. ENRs are verified for duplicates, invalid
|
||||||
|
@ -79,8 +79,8 @@ proc verifyNodesRecords(enrs: openarray[Record], fromNode: Node, nodesLimit: int
|
||||||
seen.incl(n)
|
seen.incl(n)
|
||||||
result.add(n)
|
result.add(n)
|
||||||
|
|
||||||
proc verifyNodesRecords*(enrs: openarray[Record], fromNode: Node, nodesLimit: int): seq[Node] =
|
proc verifyNodesRecords*(enrs: openArray[Record], fromNode: Node, nodesLimit: int): seq[Node] =
|
||||||
verifyNodesRecords(enrs, fromNode, nodesLimit, none[seq[uint16]]())
|
verifyNodesRecords(enrs, fromNode, nodesLimit, none[seq[uint16]]())
|
||||||
|
|
||||||
proc verifyNodesRecords*(enrs: openarray[Record], fromNode: Node, nodesLimit: int, distances: seq[uint16]): seq[Node] =
|
proc verifyNodesRecords*(enrs: openArray[Record], fromNode: Node, nodesLimit: int, distances: seq[uint16]): seq[Node] =
|
||||||
verifyNodesRecords(enrs, fromNode, nodesLimit, some[seq[uint16]](distances))
|
verifyNodesRecords(enrs, fromNode, nodesLimit, some[seq[uint16]](distances))
|
||||||
|
|
|
@ -173,7 +173,7 @@ proc addNode*(d: Protocol, enr: EnrUri): bool =
|
||||||
## Returns false if no valid ENR URI, or on the conditions of `addNode` from
|
## Returns false if no valid ENR URI, or on the conditions of `addNode` from
|
||||||
## an `Record`.
|
## an `Record`.
|
||||||
var r: Record
|
var r: Record
|
||||||
let res = r.fromUri(enr)
|
let res = r.fromURI(enr)
|
||||||
if res:
|
if res:
|
||||||
return d.addNode(r)
|
return d.addNode(r)
|
||||||
|
|
||||||
|
@ -217,7 +217,7 @@ func getRecord*(d: Protocol): Record =
|
||||||
d.localNode.record
|
d.localNode.record
|
||||||
|
|
||||||
proc updateRecord*(
|
proc updateRecord*(
|
||||||
d: Protocol, enrFields: openarray[(string, seq[byte])]): DiscResult[void] =
|
d: Protocol, enrFields: openArray[(string, seq[byte])]): DiscResult[void] =
|
||||||
## Update the ENR of the local node with provided `enrFields` k:v pairs.
|
## Update the ENR of the local node with provided `enrFields` k:v pairs.
|
||||||
let fields = mapIt(enrFields, toFieldPair(it[0], it[1]))
|
let fields = mapIt(enrFields, toFieldPair(it[0], it[1]))
|
||||||
d.localNode.record.update(d.privateKey, fields)
|
d.localNode.record.update(d.privateKey, fields)
|
||||||
|
@ -246,7 +246,7 @@ proc send(d: Protocol, n: Node, data: seq[byte]) =
|
||||||
d.send(n.address.get(), data)
|
d.send(n.address.get(), data)
|
||||||
|
|
||||||
proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address, reqId: RequestId,
|
proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address, reqId: RequestId,
|
||||||
nodes: openarray[Node]) =
|
nodes: openArray[Node]) =
|
||||||
proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address,
|
proc sendNodes(d: Protocol, toId: NodeId, toAddr: Address,
|
||||||
message: NodesMessage, reqId: RequestId) {.nimcall.} =
|
message: NodesMessage, reqId: RequestId) {.nimcall.} =
|
||||||
let (data, _) = encodeMessagePacket(d.rng[], d.codec, toId, toAddr,
|
let (data, _) = encodeMessagePacket(d.rng[], d.codec, toId, toAddr,
|
||||||
|
@ -332,9 +332,9 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
|
||||||
of ping:
|
of ping:
|
||||||
discovery_message_requests_incoming.inc()
|
discovery_message_requests_incoming.inc()
|
||||||
d.handlePing(srcId, fromAddr, message.ping, message.reqId)
|
d.handlePing(srcId, fromAddr, message.ping, message.reqId)
|
||||||
of findNode:
|
of findnode:
|
||||||
discovery_message_requests_incoming.inc()
|
discovery_message_requests_incoming.inc()
|
||||||
d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId)
|
d.handleFindNode(srcId, fromAddr, message.findnode, message.reqId)
|
||||||
of talkreq:
|
of talkreq:
|
||||||
discovery_message_requests_incoming.inc()
|
discovery_message_requests_incoming.inc()
|
||||||
d.handleTalkReq(srcId, fromAddr, message.talkreq, message.reqId)
|
d.handleTalkReq(srcId, fromAddr, message.talkreq, message.reqId)
|
||||||
|
@ -362,7 +362,7 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
|
||||||
|
|
||||||
proc sendWhoareyou(d: Protocol, toId: NodeId, a: Address,
|
proc sendWhoareyou(d: Protocol, toId: NodeId, a: Address,
|
||||||
requestNonce: AESGCMNonce, node: Option[Node]) =
|
requestNonce: AESGCMNonce, node: Option[Node]) =
|
||||||
let key = HandShakeKey(nodeId: toId, address: a)
|
let key = HandshakeKey(nodeId: toId, address: a)
|
||||||
if not d.codec.hasHandshake(key):
|
if not d.codec.hasHandshake(key):
|
||||||
let
|
let
|
||||||
recordSeq = if node.isSome(): node.get().record.seqNum
|
recordSeq = if node.isSome(): node.get().record.seqNum
|
||||||
|
@ -791,7 +791,7 @@ proc populateTable*(d: Protocol) {.async.} =
|
||||||
proc revalidateNode*(d: Protocol, n: Node) {.async.} =
|
proc revalidateNode*(d: Protocol, n: Node) {.async.} =
|
||||||
let pong = await d.ping(n)
|
let pong = await d.ping(n)
|
||||||
|
|
||||||
if pong.isOK():
|
if pong.isOk():
|
||||||
let res = pong.get()
|
let res = pong.get()
|
||||||
if res.enrSeq > n.record.seqNum:
|
if res.enrSeq > n.record.seqNum:
|
||||||
# Request new ENR
|
# Request new ENR
|
||||||
|
@ -883,8 +883,8 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
|
||||||
proc newProtocol*(privKey: PrivateKey,
|
proc newProtocol*(privKey: PrivateKey,
|
||||||
enrIp: Option[ValidIpAddress],
|
enrIp: Option[ValidIpAddress],
|
||||||
enrTcpPort, enrUdpPort: Option[Port],
|
enrTcpPort, enrUdpPort: Option[Port],
|
||||||
localEnrFields: openarray[(string, seq[byte])] = [],
|
localEnrFields: openArray[(string, seq[byte])] = [],
|
||||||
bootstrapRecords: openarray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
previousRecord = none[enr.Record](),
|
previousRecord = none[enr.Record](),
|
||||||
bindPort: Port,
|
bindPort: Port,
|
||||||
bindIp = IPv4_any(),
|
bindIp = IPv4_any(),
|
||||||
|
|
|
@ -13,10 +13,10 @@ proc rand*(rng: var BrHmacDrbgContext, max: Natural): int =
|
||||||
if x < randMax - (randMax mod (uint64(max) + 1'u64)): # against modulo bias
|
if x < randMax - (randMax mod (uint64(max) + 1'u64)): # against modulo bias
|
||||||
return int(x mod (uint64(max) + 1'u64))
|
return int(x mod (uint64(max) + 1'u64))
|
||||||
|
|
||||||
proc sample*[T](rng: var BrHmacDrbgContext, a: openarray[T]): T =
|
proc sample*[T](rng: var BrHmacDrbgContext, a: openArray[T]): T =
|
||||||
result = a[rng.rand(a.high)]
|
result = a[rng.rand(a.high)]
|
||||||
|
|
||||||
proc shuffle*[T](rng: var BrHmacDrbgContext, a: var openarray[T]) =
|
proc shuffle*[T](rng: var BrHmacDrbgContext, a: var openArray[T]) =
|
||||||
for i in countdown(a.high, 1):
|
for i in countdown(a.high, 1):
|
||||||
let j = rng.rand(i)
|
let j = rng.rand(i)
|
||||||
swap(a[i], a[j])
|
swap(a[i], a[j])
|
||||||
|
|
|
@ -93,7 +93,7 @@ type
|
||||||
NoAddress
|
NoAddress
|
||||||
|
|
||||||
# xor distance functions
|
# xor distance functions
|
||||||
func distance*(a, b: NodeId): Uint256 =
|
func distance*(a, b: NodeId): UInt256 =
|
||||||
## Calculate the distance to a NodeId.
|
## Calculate the distance to a NodeId.
|
||||||
a xor b
|
a xor b
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ const
|
||||||
XorDistanceCalculator* = DistanceCalculator(calculateDistance: distance,
|
XorDistanceCalculator* = DistanceCalculator(calculateDistance: distance,
|
||||||
calculateLogDistance: logDistance, calculateIdAtDistance: idAtDistance)
|
calculateLogDistance: logDistance, calculateIdAtDistance: idAtDistance)
|
||||||
|
|
||||||
func distance*(r: RoutingTable, a, b: NodeId): Uint256 =
|
func distance*(r: RoutingTable, a, b: NodeId): UInt256 =
|
||||||
r.distanceCalculator.calculateDistance(a, b)
|
r.distanceCalculator.calculateDistance(a, b)
|
||||||
|
|
||||||
func logDistance*(r: RoutingTable, a, b: NodeId): uint16 =
|
func logDistance*(r: RoutingTable, a, b: NodeId): uint16 =
|
||||||
|
@ -172,7 +172,7 @@ proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool =
|
||||||
return false
|
return false
|
||||||
# Check ip limit for routing table
|
# Check ip limit for routing table
|
||||||
if not r.ipLimits.inc(ip):
|
if not r.ipLimits.inc(ip):
|
||||||
b.iplimits.dec(ip)
|
b.ipLimits.dec(ip)
|
||||||
return false
|
return false
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -225,7 +225,7 @@ proc inRange(k: KBucket, n: Node): bool =
|
||||||
|
|
||||||
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
||||||
|
|
||||||
proc binaryGetBucketForNode*(buckets: openarray[KBucket],
|
proc binaryGetBucketForNode*(buckets: openArray[KBucket],
|
||||||
id: NodeId): KBucket =
|
id: NodeId): KBucket =
|
||||||
## Given a list of ordered buckets, returns the bucket for a given `NodeId`.
|
## Given a list of ordered buckets, returns the bucket for a given `NodeId`.
|
||||||
## Returns nil if no bucket in range for given `id` is found.
|
## Returns nil if no bucket in range for given `id` is found.
|
||||||
|
@ -233,13 +233,13 @@ proc binaryGetBucketForNode*(buckets: openarray[KBucket],
|
||||||
cmp(a.iend, b)
|
cmp(a.iend, b)
|
||||||
|
|
||||||
# Prevent cases where `lowerBound` returns an out of range index e.g. at empty
|
# Prevent cases where `lowerBound` returns an out of range index e.g. at empty
|
||||||
# openarray, or when the id is out range for all buckets in the openarray.
|
# openArray, or when the id is out range for all buckets in the openArray.
|
||||||
if bucketPos < buckets.len:
|
if bucketPos < buckets.len:
|
||||||
let bucket = buckets[bucketPos]
|
let bucket = buckets[bucketPos]
|
||||||
if bucket.istart <= id and id <= bucket.iend:
|
if bucket.istart <= id and id <= bucket.iend:
|
||||||
result = bucket
|
result = bucket
|
||||||
|
|
||||||
proc computeSharedPrefixBits(nodes: openarray[NodeId]): int =
|
proc computeSharedPrefixBits(nodes: openArray[NodeId]): int =
|
||||||
## Count the number of prefix bits shared by all nodes.
|
## Count the number of prefix bits shared by all nodes.
|
||||||
if nodes.len < 2:
|
if nodes.len < 2:
|
||||||
return ID_SIZE
|
return ID_SIZE
|
||||||
|
@ -266,7 +266,7 @@ proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop
|
||||||
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
|
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
|
||||||
RoutingTable(
|
RoutingTable(
|
||||||
localNode: localNode,
|
localNode: localNode,
|
||||||
buckets: @[KBucket.new(0.u256, high(Uint256), ipLimits.bucketIpLimit)],
|
buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)],
|
||||||
bitsPerHop: bitsPerHop,
|
bitsPerHop: bitsPerHop,
|
||||||
ipLimits: IpLimits(limit: ipLimits.tableIpLimit),
|
ipLimits: IpLimits(limit: ipLimits.tableIpLimit),
|
||||||
distanceCalculator: distanceCalculator,
|
distanceCalculator: distanceCalculator,
|
||||||
|
@ -478,7 +478,7 @@ proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16],
|
||||||
proc len*(r: RoutingTable): int =
|
proc len*(r: RoutingTable): int =
|
||||||
for b in r.buckets: result += b.len
|
for b in r.buckets: result += b.len
|
||||||
|
|
||||||
proc moveRight[T](arr: var openarray[T], a, b: int) =
|
proc moveRight[T](arr: var openArray[T], a, b: int) =
|
||||||
## In `arr` move elements in range [a, b] right by 1.
|
## In `arr` move elements in range [a, b] right by 1.
|
||||||
var t: T
|
var t: T
|
||||||
shallowCopy(t, arr[b + 1])
|
shallowCopy(t, arr[b + 1])
|
||||||
|
|
|
@ -73,7 +73,7 @@ template eciesIvPos(): int =
|
||||||
template eciesTagPos(size: int): int =
|
template eciesTagPos(size: int): int =
|
||||||
1 + sizeof(PublicKey) + aes128.sizeBlock + size
|
1 + sizeof(PublicKey) + aes128.sizeBlock + size
|
||||||
|
|
||||||
proc kdf*(data: openarray[byte]): array[KeyLength, byte] {.noInit.} =
|
proc kdf*(data: openArray[byte]): array[KeyLength, byte] {.noinit.} =
|
||||||
## NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1)
|
## NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1)
|
||||||
var ctx: sha256
|
var ctx: sha256
|
||||||
var counter: uint32
|
var counter: uint32
|
||||||
|
@ -93,9 +93,9 @@ proc kdf*(data: openarray[byte]): array[KeyLength, byte] {.noInit.} =
|
||||||
ctx.clear() # clean ctx
|
ctx.clear() # clean ctx
|
||||||
copyMem(addr result[0], addr storage[0], KeyLength)
|
copyMem(addr result[0], addr storage[0], KeyLength)
|
||||||
|
|
||||||
proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openarray[byte],
|
proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openArray[byte],
|
||||||
output: var openarray[byte], pubkey: PublicKey,
|
output: var openArray[byte], pubkey: PublicKey,
|
||||||
sharedmac: openarray[byte] = emptyMac): EciesResult[void] =
|
sharedmac: openArray[byte] = emptyMac): EciesResult[void] =
|
||||||
## Encrypt data with ECIES method using given public key `pubkey`.
|
## Encrypt data with ECIES method using given public key `pubkey`.
|
||||||
## ``input`` - input data
|
## ``input`` - input data
|
||||||
## ``output`` - output data
|
## ``output`` - output data
|
||||||
|
@ -156,10 +156,10 @@ proc eciesEncrypt*(rng: var BrHmacDrbgContext, input: openarray[byte],
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc eciesDecrypt*(input: openarray[byte],
|
proc eciesDecrypt*(input: openArray[byte],
|
||||||
output: var openarray[byte],
|
output: var openArray[byte],
|
||||||
seckey: PrivateKey,
|
seckey: PrivateKey,
|
||||||
sharedmac: openarray[byte] = emptyMac): EciesResult[void] =
|
sharedmac: openArray[byte] = emptyMac): EciesResult[void] =
|
||||||
## Decrypt data with ECIES method using given private key `seckey`.
|
## Decrypt data with ECIES method using given private key `seckey`.
|
||||||
## ``input`` - input data
|
## ``input`` - input data
|
||||||
## ``output`` - output data
|
## ``output`` - output data
|
||||||
|
|
|
@ -144,7 +144,7 @@ proc isFull(k: KBucket): bool = k.len == BUCKET_SIZE
|
||||||
|
|
||||||
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
||||||
|
|
||||||
proc binaryGetBucketForNode(buckets: openarray[KBucket], n: Node):
|
proc binaryGetBucketForNode(buckets: openArray[KBucket], n: Node):
|
||||||
KBucket {.raises: [ValueError, Defect].} =
|
KBucket {.raises: [ValueError, Defect].} =
|
||||||
## Given a list of ordered buckets, returns the bucket for a given node.
|
## Given a list of ordered buckets, returns the bucket for a given node.
|
||||||
let bucketPos = lowerBound(buckets, n.id) do(a: KBucket, b: NodeId) -> int:
|
let bucketPos = lowerBound(buckets, n.id) do(a: KBucket, b: NodeId) -> int:
|
||||||
|
@ -158,7 +158,7 @@ proc binaryGetBucketForNode(buckets: openarray[KBucket], n: Node):
|
||||||
if result.isNil:
|
if result.isNil:
|
||||||
raise newException(ValueError, "No bucket found for node with id " & $n.id)
|
raise newException(ValueError, "No bucket found for node with id " & $n.id)
|
||||||
|
|
||||||
proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
proc computeSharedPrefixBits(nodes: openArray[Node]): int =
|
||||||
## Count the number of prefix bits shared by all nodes.
|
## Count the number of prefix bits shared by all nodes.
|
||||||
if nodes.len < 2:
|
if nodes.len < 2:
|
||||||
return ID_SIZE
|
return ID_SIZE
|
||||||
|
@ -176,7 +176,7 @@ proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
||||||
|
|
||||||
proc init(r: var RoutingTable, thisNode: Node) =
|
proc init(r: var RoutingTable, thisNode: Node) =
|
||||||
r.thisNode = thisNode
|
r.thisNode = thisNode
|
||||||
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
r.buckets = @[newKBucket(0.u256, high(UInt256))]
|
||||||
randomize() # for later `randomNodes` selection
|
randomize() # for later `randomNodes` selection
|
||||||
|
|
||||||
proc splitBucket(r: var RoutingTable, index: int) =
|
proc splitBucket(r: var RoutingTable, index: int) =
|
||||||
|
|
|
@ -32,7 +32,7 @@ proc initProtocolState*[T](state: T, x: Peer|EthereumNode)
|
||||||
{.gcsafe, raises: [Defect].} =
|
{.gcsafe, raises: [Defect].} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc initProtocolStates(peer: Peer, protocols: openarray[ProtocolInfo])
|
proc initProtocolStates(peer: Peer, protocols: openArray[ProtocolInfo])
|
||||||
{.raises: [Defect].} =
|
{.raises: [Defect].} =
|
||||||
# Initialize all the active protocol states
|
# Initialize all the active protocol states
|
||||||
newSeq(peer.protocolStates, allProtocols.len)
|
newSeq(peer.protocolStates, allProtocols.len)
|
||||||
|
|
|
@ -272,7 +272,7 @@ proc chooseFieldType(n: NimNode): NimNode =
|
||||||
## and selects the corresponding field type for use in the
|
## and selects the corresponding field type for use in the
|
||||||
## message object type (i.e. `p2p.hello`).
|
## message object type (i.e. `p2p.hello`).
|
||||||
##
|
##
|
||||||
## For now, only openarray types are remapped to sequences.
|
## For now, only openArray types are remapped to sequences.
|
||||||
result = n
|
result = n
|
||||||
if n.kind == nnkBracketExpr and eqIdent(n[0], "openArray"):
|
if n.kind == nnkBracketExpr and eqIdent(n[0], "openArray"):
|
||||||
result = n.copyNimTree
|
result = n.copyNimTree
|
||||||
|
@ -352,7 +352,7 @@ proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = -1) =
|
||||||
|
|
||||||
userHandlerProc.body.insert 0, prelude
|
userHandlerProc.body.insert 0, prelude
|
||||||
|
|
||||||
# We allow the user handler to use `openarray` params, but we turn
|
# We allow the user handler to use `openArray` params, but we turn
|
||||||
# those into sequences to make the `async` pragma happy.
|
# those into sequences to make the `async` pragma happy.
|
||||||
for i in 1 ..< userHandlerProc.params.len:
|
for i in 1 ..< userHandlerProc.params.len:
|
||||||
var param = userHandlerProc.params[i]
|
var param = userHandlerProc.params[i]
|
||||||
|
@ -428,7 +428,7 @@ proc newMsg(protocol: P2PProtocol, kind: MessageKind, id: int,
|
||||||
for param, paramType in procDef.typedInputParams(skip = 1):
|
for param, paramType in procDef.typedInputParams(skip = 1):
|
||||||
recFields.add newTree(nnkIdentDefs,
|
recFields.add newTree(nnkIdentDefs,
|
||||||
newTree(nnkPostfix, ident("*"), param), # The fields are public
|
newTree(nnkPostfix, ident("*"), param), # The fields are public
|
||||||
chooseFieldType(paramType), # some types such as openarray
|
chooseFieldType(paramType), # some types such as openArray
|
||||||
newEmptyNode()) # are automatically remapped
|
newEmptyNode()) # are automatically remapped
|
||||||
|
|
||||||
if recFields.len == 1 and protocol.useSingleRecordInlining:
|
if recFields.len == 1 and protocol.useSingleRecordInlining:
|
||||||
|
@ -564,7 +564,7 @@ proc createSendProc*(msg: Message,
|
||||||
newEmptyNode(),
|
newEmptyNode(),
|
||||||
newStmtList()) ## body
|
newStmtList()) ## body
|
||||||
|
|
||||||
if proctype == nnkProcDef:
|
if procType == nnkProcDef:
|
||||||
for p in msg.procDef.pragma:
|
for p in msg.procDef.pragma:
|
||||||
if not eqIdent(p, "async"):
|
if not eqIdent(p, "async"):
|
||||||
def.addPragma p
|
def.addPragma p
|
||||||
|
|
|
@ -170,7 +170,7 @@ proc numProtocols(d: Dispatcher): int =
|
||||||
d.activeProtocols.len
|
d.activeProtocols.len
|
||||||
|
|
||||||
proc getDispatcher(node: EthereumNode,
|
proc getDispatcher(node: EthereumNode,
|
||||||
otherPeerCapabilities: openarray[Capability]): Dispatcher =
|
otherPeerCapabilities: openArray[Capability]): Dispatcher =
|
||||||
# TODO: sub-optimal solution until progress is made here:
|
# TODO: sub-optimal solution until progress is made here:
|
||||||
# https://github.com/nim-lang/Nim/issues/7457
|
# https://github.com/nim-lang/Nim/issues/7457
|
||||||
# We should be able to find an existing dispatcher without allocating a new one
|
# We should be able to find an existing dispatcher without allocating a new one
|
||||||
|
@ -945,7 +945,7 @@ proc checkUselessPeer(peer: Peer) {.raises: [UselessPeerError, Defect].} =
|
||||||
# XXX: Send disconnect + UselessPeer
|
# XXX: Send disconnect + UselessPeer
|
||||||
raise newException(UselessPeerError, "Useless peer")
|
raise newException(UselessPeerError, "Useless peer")
|
||||||
|
|
||||||
proc initPeerState*(peer: Peer, capabilities: openarray[Capability])
|
proc initPeerState*(peer: Peer, capabilities: openArray[Capability])
|
||||||
{.raises: [UselessPeerError, Defect].} =
|
{.raises: [UselessPeerError, Defect].} =
|
||||||
peer.dispatcher = getDispatcher(peer.network, capabilities)
|
peer.dispatcher = getDispatcher(peer.network, capabilities)
|
||||||
checkUselessPeer(peer)
|
checkUselessPeer(peer)
|
||||||
|
@ -1015,7 +1015,7 @@ template `^`(arr): auto =
|
||||||
# variable as an open array
|
# variable as an open array
|
||||||
arr.toOpenArray(0, `arr Len` - 1)
|
arr.toOpenArray(0, `arr Len` - 1)
|
||||||
|
|
||||||
proc initSecretState(hs: var Handshake, authMsg, ackMsg: openarray[byte],
|
proc initSecretState(hs: var Handshake, authMsg, ackMsg: openArray[byte],
|
||||||
p: Peer) =
|
p: Peer) =
|
||||||
var secrets = hs.getSecrets(authMsg, ackMsg)
|
var secrets = hs.getSecrets(authMsg, ackMsg)
|
||||||
initSecretState(secrets, p.secretsState)
|
initSecretState(secrets, p.secretsState)
|
||||||
|
@ -1101,7 +1101,7 @@ proc rlpxConnect*(node: EthereumNode, remote: Node): Future[Peer] {.async.} =
|
||||||
result.waitSingleMsg(DevP2P.hello),
|
result.waitSingleMsg(DevP2P.hello),
|
||||||
10.seconds)
|
10.seconds)
|
||||||
|
|
||||||
if not validatePubKeyInHello(response, remote.node.pubKey):
|
if not validatePubKeyInHello(response, remote.node.pubkey):
|
||||||
warn "Remote nodeId is not its public key" # XXX: Do we care?
|
warn "Remote nodeId is not its public key" # XXX: Do we care?
|
||||||
|
|
||||||
trace "DevP2P handshake completed", peer = remote,
|
trace "DevP2P handshake completed", peer = remote,
|
||||||
|
@ -1145,7 +1145,7 @@ proc rlpxAccept*(node: EthereumNode,
|
||||||
result.network = node
|
result.network = node
|
||||||
|
|
||||||
var handshake =
|
var handshake =
|
||||||
HandShake.tryInit(node.rng[], node.keys, {auth.Responder}).tryGet
|
Handshake.tryInit(node.rng[], node.keys, {auth.Responder}).tryGet
|
||||||
|
|
||||||
var ok = false
|
var ok = false
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -71,10 +71,10 @@ p2pProtocol eth(version = protocolVersion,
|
||||||
bestHash: KeccakHash,
|
bestHash: KeccakHash,
|
||||||
genesisHash: KeccakHash)
|
genesisHash: KeccakHash)
|
||||||
|
|
||||||
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
proc transactions(peer: Peer, transactions: openArray[Transaction]) =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
|
@ -85,17 +85,17 @@ p2pProtocol eth(version = protocolVersion,
|
||||||
|
|
||||||
await response.send(peer.network.chain.getBlockHeaders(request))
|
await response.send(peer.network.chain.getBlockHeaders(request))
|
||||||
|
|
||||||
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
proc blockHeaders(p: Peer, headers: openArray[BlockHeader])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) {.gcsafe.} =
|
proc getBlockBodies(peer: Peer, hashes: openArray[KeccakHash]) {.gcsafe.} =
|
||||||
if hashes.len > maxBodiesFetch:
|
if hashes.len > maxBodiesFetch:
|
||||||
await peer.disconnect(BreachOfProtocol)
|
await peer.disconnect(BreachOfProtocol)
|
||||||
return
|
return
|
||||||
|
|
||||||
await response.send(peer.network.chain.getBlockBodies(hashes))
|
await response.send(peer.network.chain.getBlockBodies(hashes))
|
||||||
|
|
||||||
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
proc blockBodies(peer: Peer, blocks: openArray[BlockBody])
|
||||||
|
|
||||||
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
||||||
discard
|
discard
|
||||||
|
@ -103,15 +103,15 @@ p2pProtocol eth(version = protocolVersion,
|
||||||
nextID 13
|
nextID 13
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
proc getNodeData(peer: Peer, hashes: openArray[KeccakHash]) =
|
||||||
await response.send(peer.network.chain.getStorageNodes(hashes))
|
await response.send(peer.network.chain.getStorageNodes(hashes))
|
||||||
|
|
||||||
proc nodeData(peer: Peer, data: openarray[Blob])
|
proc nodeData(peer: Peer, data: openArray[Blob])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) = discard
|
proc getReceipts(peer: Peer, hashes: openArray[KeccakHash]) = discard
|
||||||
# TODO: implement `getReceipts` and reactivate this code
|
# TODO: implement `getReceipts` and reactivate this code
|
||||||
# await response.send(peer.network.chain.getReceipts(hashes))
|
# await response.send(peer.network.chain.getReceipts(hashes))
|
||||||
|
|
||||||
proc receipts(peer: Peer, receipts: openarray[Receipt])
|
proc receipts(peer: Peer, receipts: openArray[Receipt])
|
||||||
|
|
||||||
|
|
|
@ -144,13 +144,13 @@ template updateBV: BufValueInt =
|
||||||
bufValueAfterRequest(lesNetwork, lesPeer,
|
bufValueAfterRequest(lesNetwork, lesPeer,
|
||||||
perProtocolMsgId, requestCostQuantity)
|
perProtocolMsgId, requestCostQuantity)
|
||||||
|
|
||||||
func getValue(values: openarray[KeyValuePair],
|
func getValue(values: openArray[KeyValuePair],
|
||||||
key: string, T: typedesc): Option[T] =
|
key: string, T: typedesc): Option[T] =
|
||||||
for v in values:
|
for v in values:
|
||||||
if v.key == key:
|
if v.key == key:
|
||||||
return some(rlp.decode(v.value, T))
|
return some(rlp.decode(v.value, T))
|
||||||
|
|
||||||
func getRequiredValue(values: openarray[KeyValuePair],
|
func getRequiredValue(values: openArray[KeyValuePair],
|
||||||
key: string, T: typedesc): T =
|
key: string, T: typedesc): T =
|
||||||
for v in values:
|
for v in values:
|
||||||
if v.key == key:
|
if v.key == key:
|
||||||
|
@ -166,7 +166,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
incomingRequestDecorator = incomingRequestDecorator,
|
incomingRequestDecorator = incomingRequestDecorator,
|
||||||
incomingResponseThunkDecorator = incomingResponseDecorator):
|
incomingResponseThunkDecorator = incomingResponseDecorator):
|
||||||
handshake:
|
handshake:
|
||||||
proc status(p: Peer, values: openarray[KeyValuePair])
|
proc status(p: Peer, values: openArray[KeyValuePair])
|
||||||
|
|
||||||
onPeerConnected do (peer: Peer):
|
onPeerConnected do (peer: Peer):
|
||||||
let
|
let
|
||||||
|
@ -254,7 +254,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
headNumber: BlockNumber,
|
headNumber: BlockNumber,
|
||||||
headTotalDifficulty: DifficultyInt,
|
headTotalDifficulty: DifficultyInt,
|
||||||
reorgDepth: BlockNumber,
|
reorgDepth: BlockNumber,
|
||||||
values: openarray[KeyValuePair],
|
values: openArray[KeyValuePair],
|
||||||
announceType: AnnounceType) =
|
announceType: AnnounceType) =
|
||||||
|
|
||||||
if peer.state.announceType == AnnounceType.None:
|
if peer.state.announceType == AnnounceType.None:
|
||||||
|
@ -288,7 +288,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc blockHeaders(
|
proc blockHeaders(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
blocks: openarray[BlockHeader])
|
blocks: openArray[BlockHeader])
|
||||||
|
|
||||||
## On-damand data retrieval
|
## On-damand data retrieval
|
||||||
##
|
##
|
||||||
|
@ -296,7 +296,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getBlockBodies(
|
proc getBlockBodies(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
blocks: openarray[KeccakHash]) {.
|
blocks: openArray[KeccakHash]) {.
|
||||||
costQuantity(blocks.len, max = maxBodiesFetch), gcsafe.} =
|
costQuantity(blocks.len, max = maxBodiesFetch), gcsafe.} =
|
||||||
|
|
||||||
let blocks = peer.network.chain.getBlockBodies(blocks)
|
let blocks = peer.network.chain.getBlockBodies(blocks)
|
||||||
|
@ -305,12 +305,12 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc blockBodies(
|
proc blockBodies(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
bodies: openarray[BlockBody])
|
bodies: openArray[BlockBody])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getReceipts(
|
proc getReceipts(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
hashes: openarray[KeccakHash])
|
hashes: openArray[KeccakHash])
|
||||||
{.costQuantity(hashes.len, max = maxReceiptsFetch).} =
|
{.costQuantity(hashes.len, max = maxReceiptsFetch).} =
|
||||||
|
|
||||||
let receipts = peer.network.chain.getReceipts(hashes)
|
let receipts = peer.network.chain.getReceipts(hashes)
|
||||||
|
@ -319,12 +319,12 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc receipts(
|
proc receipts(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
receipts: openarray[Receipt])
|
receipts: openArray[Receipt])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getProofs(
|
proc getProofs(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
proofs: openarray[ProofRequest]) {.
|
proofs: openArray[ProofRequest]) {.
|
||||||
costQuantity(proofs.len, max = maxProofsFetch).} =
|
costQuantity(proofs.len, max = maxProofsFetch).} =
|
||||||
|
|
||||||
let proofs = peer.network.chain.getProofs(proofs)
|
let proofs = peer.network.chain.getProofs(proofs)
|
||||||
|
@ -333,7 +333,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc proofs(
|
proc proofs(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
proofs: openarray[Blob])
|
proofs: openArray[Blob])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getContractCodes(
|
proc getContractCodes(
|
||||||
|
@ -354,7 +354,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getHeaderProofs(
|
proc getHeaderProofs(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
reqs: openarray[ProofRequest]) {.
|
reqs: openArray[ProofRequest]) {.
|
||||||
costQuantity(reqs.len, max = maxHeaderProofsFetch).} =
|
costQuantity(reqs.len, max = maxHeaderProofsFetch).} =
|
||||||
|
|
||||||
let proofs = peer.network.chain.getHeaderProofs(reqs)
|
let proofs = peer.network.chain.getHeaderProofs(reqs)
|
||||||
|
@ -363,12 +363,12 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc headerProofs(
|
proc headerProofs(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
proofs: openarray[Blob])
|
proofs: openArray[Blob])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getHelperTrieProofs(
|
proc getHelperTrieProofs(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
reqs: openarray[HelperTrieProofRequest]) {.
|
reqs: openArray[HelperTrieProofRequest]) {.
|
||||||
costQuantity(reqs.len, max = maxProofsFetch).} =
|
costQuantity(reqs.len, max = maxProofsFetch).} =
|
||||||
|
|
||||||
var nodes, auxData: seq[Blob]
|
var nodes, auxData: seq[Blob]
|
||||||
|
@ -387,7 +387,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc sendTxV2(
|
proc sendTxV2(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
transactions: openarray[Transaction]) {.
|
transactions: openArray[Transaction]) {.
|
||||||
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
||||||
|
|
||||||
let chain = peer.network.chain
|
let chain = peer.network.chain
|
||||||
|
@ -409,7 +409,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
|
|
||||||
proc getTxStatus(
|
proc getTxStatus(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
transactions: openarray[Transaction]) {.
|
transactions: openArray[Transaction]) {.
|
||||||
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
||||||
|
|
||||||
let chain = peer.network.chain
|
let chain = peer.network.chain
|
||||||
|
@ -422,7 +422,7 @@ p2pProtocol les(version = lesVersion,
|
||||||
proc txStatus(
|
proc txStatus(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
bufValue: BufValueInt,
|
bufValue: BufValueInt,
|
||||||
transactions: openarray[TransactionStatusMsg])
|
transactions: openArray[TransactionStatusMsg])
|
||||||
|
|
||||||
proc configureLes*(node: EthereumNode,
|
proc configureLes*(node: EthereumNode,
|
||||||
# Client options:
|
# Client options:
|
||||||
|
|
|
@ -189,7 +189,7 @@ proc fullBloom*(): Bloom =
|
||||||
for i in 0..<result.len:
|
for i in 0..<result.len:
|
||||||
result[i] = 0xFF
|
result[i] = 0xFF
|
||||||
|
|
||||||
proc encryptAesGcm(plain: openarray[byte], key: SymKey,
|
proc encryptAesGcm(plain: openArray[byte], key: SymKey,
|
||||||
iv: array[gcmIVLen, byte]): seq[byte] =
|
iv: array[gcmIVLen, byte]): seq[byte] =
|
||||||
## Encrypt using AES-GCM, making sure to append tag and iv, in that order
|
## Encrypt using AES-GCM, making sure to append tag and iv, in that order
|
||||||
var gcm: GCM[aes256]
|
var gcm: GCM[aes256]
|
||||||
|
@ -203,7 +203,7 @@ proc encryptAesGcm(plain: openarray[byte], key: SymKey,
|
||||||
result.add tag
|
result.add tag
|
||||||
result.add iv
|
result.add iv
|
||||||
|
|
||||||
proc decryptAesGcm(cipher: openarray[byte], key: SymKey): Option[seq[byte]] =
|
proc decryptAesGcm(cipher: openArray[byte], key: SymKey): Option[seq[byte]] =
|
||||||
## Decrypt AES-GCM ciphertext and validate authenticity - assumes
|
## Decrypt AES-GCM ciphertext and validate authenticity - assumes
|
||||||
## cipher-tag-iv format of the buffer
|
## cipher-tag-iv format of the buffer
|
||||||
if cipher.len < gcmTagLen + gcmIVLen:
|
if cipher.len < gcmTagLen + gcmIVLen:
|
||||||
|
@ -310,7 +310,7 @@ proc encode*(rng: var BrHmacDrbgContext, self: Payload): Option[seq[byte]] =
|
||||||
# No encryption!
|
# No encryption!
|
||||||
return some(plain)
|
return some(plain)
|
||||||
|
|
||||||
proc decode*(data: openarray[byte], dst = none[PrivateKey](),
|
proc decode*(data: openArray[byte], dst = none[PrivateKey](),
|
||||||
symKey = none[SymKey]()): Option[DecodedPayload] =
|
symKey = none[SymKey]()): Option[DecodedPayload] =
|
||||||
## Decode data into payload, potentially trying to decrypt if keys are
|
## Decode data into payload, potentially trying to decrypt if keys are
|
||||||
## provided
|
## provided
|
||||||
|
|
|
@ -163,7 +163,7 @@ p2pProtocol Whisper(version = whisperVersion,
|
||||||
bloom: seq[byte],
|
bloom: seq[byte],
|
||||||
isLightNode: bool)
|
isLightNode: bool)
|
||||||
|
|
||||||
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
|
proc messages(peer: Peer, envelopes: openArray[Envelope]) =
|
||||||
if not peer.state.initialized:
|
if not peer.state.initialized:
|
||||||
warn "Handshake not completed yet, discarding messages"
|
warn "Handshake not completed yet, discarding messages"
|
||||||
return
|
return
|
||||||
|
|
|
@ -53,7 +53,7 @@ proc roundup16*(x: int): int {.inline.} =
|
||||||
template toa(a, b, c: untyped): untyped =
|
template toa(a, b, c: untyped): untyped =
|
||||||
toOpenArray((a), (b), (b) + (c) - 1)
|
toOpenArray((a), (b), (b) + (c) - 1)
|
||||||
|
|
||||||
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
proc sxor[T](a: var openArray[T], b: openArray[T]) {.inline.} =
|
||||||
doAssert(len(a) == len(b))
|
doAssert(len(a) == len(b))
|
||||||
for i in 0 ..< len(a):
|
for i in 0 ..< len(a):
|
||||||
a[i] = a[i] xor b[i]
|
a[i] = a[i] xor b[i]
|
||||||
|
@ -80,9 +80,9 @@ template decryptedLength*(size: int): int =
|
||||||
## Returns size of decrypted message for body with length `size`.
|
## Returns size of decrypted message for body with length `size`.
|
||||||
roundup16(size)
|
roundup16(size)
|
||||||
|
|
||||||
proc encrypt*(c: var SecretState, header: openarray[byte],
|
proc encrypt*(c: var SecretState, header: openArray[byte],
|
||||||
frame: openarray[byte],
|
frame: openArray[byte],
|
||||||
output: var openarray[byte]): RlpxResult[void] =
|
output: var openArray[byte]): RlpxResult[void] =
|
||||||
## Encrypts `header` and `frame` using SecretState `c` context and store
|
## Encrypts `header` and `frame` using SecretState `c` context and store
|
||||||
## result into `output`.
|
## result into `output`.
|
||||||
##
|
##
|
||||||
|
@ -136,7 +136,7 @@ proc encrypt*(c: var SecretState, header: openarray[byte],
|
||||||
copyMem(addr output[frameMacPos], addr frameMac.data[0], RlpHeaderLength)
|
copyMem(addr output[frameMacPos], addr frameMac.data[0], RlpHeaderLength)
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc encryptMsg*(msg: openarray[byte], secrets: var SecretState): seq[byte] =
|
proc encryptMsg*(msg: openArray[byte], secrets: var SecretState): seq[byte] =
|
||||||
var header: RlpxHeader
|
var header: RlpxHeader
|
||||||
|
|
||||||
if uint32(msg.len) > maxUInt24:
|
if uint32(msg.len) > maxUInt24:
|
||||||
|
@ -163,8 +163,8 @@ proc encryptMsg*(msg: openarray[byte], secrets: var SecretState): seq[byte] =
|
||||||
proc getBodySize*(a: RlpxHeader): int =
|
proc getBodySize*(a: RlpxHeader): int =
|
||||||
(int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2])
|
(int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2])
|
||||||
|
|
||||||
proc decryptHeader*(c: var SecretState, data: openarray[byte],
|
proc decryptHeader*(c: var SecretState, data: openArray[byte],
|
||||||
output: var openarray[byte]): RlpxResult[void] =
|
output: var openArray[byte]): RlpxResult[void] =
|
||||||
## Decrypts header `data` using SecretState `c` context and store
|
## Decrypts header `data` using SecretState `c` context and store
|
||||||
## result into `output`.
|
## result into `output`.
|
||||||
##
|
##
|
||||||
|
@ -201,15 +201,15 @@ proc decryptHeader*(c: var SecretState, data: openarray[byte],
|
||||||
result = ok()
|
result = ok()
|
||||||
|
|
||||||
proc decryptHeaderAndGetMsgSize*(c: var SecretState,
|
proc decryptHeaderAndGetMsgSize*(c: var SecretState,
|
||||||
encryptedHeader: openarray[byte],
|
encryptedHeader: openArray[byte],
|
||||||
outSize: var int): RlpxResult[void] =
|
outSize: var int): RlpxResult[void] =
|
||||||
var decryptedHeader: RlpxHeader
|
var decryptedHeader: RlpxHeader
|
||||||
result = decryptHeader(c, encryptedHeader, decryptedHeader)
|
result = decryptHeader(c, encryptedHeader, decryptedHeader)
|
||||||
if result.isOk():
|
if result.isOk():
|
||||||
outSize = decryptedHeader.getBodySize
|
outSize = decryptedHeader.getBodySize
|
||||||
|
|
||||||
proc decryptBody*(c: var SecretState, data: openarray[byte], bodysize: int,
|
proc decryptBody*(c: var SecretState, data: openArray[byte], bodysize: int,
|
||||||
output: var openarray[byte], outlen: var int): RlpxResult[void] =
|
output: var openArray[byte], outlen: var int): RlpxResult[void] =
|
||||||
## Decrypts body `data` using SecretState `c` context and store
|
## Decrypts body `data` using SecretState `c` context and store
|
||||||
## result into `output`.
|
## result into `output`.
|
||||||
##
|
##
|
||||||
|
|
|
@ -353,7 +353,7 @@ proc readImpl[E](rlp: var Rlp, T: type seq[E]): T =
|
||||||
for elem in rlp:
|
for elem in rlp:
|
||||||
result.add rlp.read(E)
|
result.add rlp.read(E)
|
||||||
|
|
||||||
proc readImpl[E](rlp: var Rlp, T: type openarray[E]): seq[E] =
|
proc readImpl[E](rlp: var Rlp, T: type openArray[E]): seq[E] =
|
||||||
result = readImpl(rlp, seq[E])
|
result = readImpl(rlp, seq[E])
|
||||||
|
|
||||||
proc readImpl(rlp: var Rlp, T: type[object|tuple],
|
proc readImpl(rlp: var Rlp, T: type[object|tuple],
|
||||||
|
@ -406,7 +406,7 @@ proc `>>`*[T](rlp: var Rlp, location: var T) =
|
||||||
template readRecordType*(rlp: var Rlp, T: type, wrappedInList: bool): auto =
|
template readRecordType*(rlp: var Rlp, T: type, wrappedInList: bool): auto =
|
||||||
readImpl(rlp, T, wrappedInList)
|
readImpl(rlp, T, wrappedInList)
|
||||||
|
|
||||||
proc decode*(bytes: openarray[byte]): RlpNode =
|
proc decode*(bytes: openArray[byte]): RlpNode =
|
||||||
var rlp = rlpFromBytes(bytes)
|
var rlp = rlpFromBytes(bytes)
|
||||||
rlp.toNodes
|
rlp.toNodes
|
||||||
|
|
||||||
|
|
|
@ -130,10 +130,10 @@ proc appendBlob(self: var RlpWriter, data: openArray[byte], startMarker: byte) =
|
||||||
proc appendImpl(self: var RlpWriter, data: string) =
|
proc appendImpl(self: var RlpWriter, data: string) =
|
||||||
appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER)
|
appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER)
|
||||||
|
|
||||||
proc appendBlob(self: var RlpWriter, data: openarray[byte]) =
|
proc appendBlob(self: var RlpWriter, data: openArray[byte]) =
|
||||||
appendBlob(self, data, BLOB_START_MARKER)
|
appendBlob(self, data, BLOB_START_MARKER)
|
||||||
|
|
||||||
proc appendBlob(self: var RlpWriter, data: openarray[char]) =
|
proc appendBlob(self: var RlpWriter, data: openArray[char]) =
|
||||||
appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER)
|
appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER)
|
||||||
|
|
||||||
proc appendInt(self: var RlpWriter, i: Integer) =
|
proc appendInt(self: var RlpWriter, i: Integer) =
|
||||||
|
@ -169,10 +169,10 @@ template appendImpl(self: var RlpWriter, e: enum) =
|
||||||
template appendImpl(self: var RlpWriter, b: bool) =
|
template appendImpl(self: var RlpWriter, b: bool) =
|
||||||
appendImpl(self, int(b))
|
appendImpl(self, int(b))
|
||||||
|
|
||||||
proc appendImpl[T](self: var RlpWriter, listOrBlob: openarray[T]) =
|
proc appendImpl[T](self: var RlpWriter, listOrBlob: openArray[T]) =
|
||||||
mixin append
|
mixin append
|
||||||
|
|
||||||
# TODO: This append proc should be overloaded by `openarray[byte]` after
|
# TODO: This append proc should be overloaded by `openArray[byte]` after
|
||||||
# nim bug #7416 is fixed.
|
# nim bug #7416 is fixed.
|
||||||
when T is (byte or char):
|
when T is (byte or char):
|
||||||
self.appendBlob(listOrBlob)
|
self.appendBlob(listOrBlob)
|
||||||
|
|
|
@ -74,13 +74,13 @@ proc hashAndSave*(self: BinaryTrie, node: openArray[byte]): TrieNodeKey =
|
||||||
self.db.put(result, node)
|
self.db.put(result, node)
|
||||||
|
|
||||||
template saveKV(self: BinaryTrie, keyPath: TrieBitSeq | bool, child: openArray[byte]): untyped =
|
template saveKV(self: BinaryTrie, keyPath: TrieBitSeq | bool, child: openArray[byte]): untyped =
|
||||||
self.hashAndsave(encodeKVNode(keyPath, child))
|
self.hashAndSave(encodeKVNode(keyPath, child))
|
||||||
|
|
||||||
template saveLeaf(self: BinaryTrie, value: openArray[byte]): untyped =
|
template saveLeaf(self: BinaryTrie, value: openArray[byte]): untyped =
|
||||||
self.hashAndsave(encodeLeafNode(value))
|
self.hashAndSave(encodeLeafNode(value))
|
||||||
|
|
||||||
template saveBranch(self: BinaryTrie, L, R: openArray[byte]): untyped =
|
template saveBranch(self: BinaryTrie, L, R: openArray[byte]): untyped =
|
||||||
self.hashAndsave(encodeBranchNode(L, R))
|
self.hashAndSave(encodeBranchNode(L, R))
|
||||||
|
|
||||||
proc setBranchNode(self: BinaryTrie, keyPath: TrieBitSeq, node: TrieNode,
|
proc setBranchNode(self: BinaryTrie, keyPath: TrieBitSeq, node: TrieNode,
|
||||||
value: openArray[byte], deleteSubtrie = false): TrieNodeKey
|
value: openArray[byte], deleteSubtrie = false): TrieNodeKey
|
||||||
|
|
|
@ -145,7 +145,7 @@ proc getWitnessImpl*(db: DB; nodeHash: TrieNodeKey; keyPath: TrieBitSeq; output:
|
||||||
raise newException(InvalidKeyError, "Key too long")
|
raise newException(InvalidKeyError, "Key too long")
|
||||||
of KV_TYPE:
|
of KV_TYPE:
|
||||||
output.add nodeVal
|
output.add nodeVal
|
||||||
if keyPath.len < node.keyPath.len and node.keyPath[0..<keyPath.len] == keypath:
|
if keyPath.len < node.keyPath.len and node.keyPath[0..<keyPath.len] == keyPath:
|
||||||
if not getTrieNodesImpl(db, node.child, output): return
|
if not getTrieNodesImpl(db, node.child, output): return
|
||||||
elif keyPath[0..<node.keyPath.len] == node.keyPath:
|
elif keyPath[0..<node.keyPath.len] == node.keyPath:
|
||||||
getWitnessImpl(db, node.child, keyPath.sliceToEnd(node.keyPath.len), output)
|
getWitnessImpl(db, node.child, keyPath.sliceToEnd(node.keyPath.len), output)
|
||||||
|
|
|
@ -15,17 +15,17 @@ type
|
||||||
deleted: HashSet[seq[byte]]
|
deleted: HashSet[seq[byte]]
|
||||||
|
|
||||||
# XXX: poor's man vtref types
|
# XXX: poor's man vtref types
|
||||||
PutProc = proc (db: RootRef, key, val: openarray[byte]) {.
|
PutProc = proc (db: RootRef, key, val: openArray[byte]) {.
|
||||||
gcsafe, raises: [Defect].}
|
gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
GetProc = proc (db: RootRef, key: openarray[byte]): seq[byte] {.
|
GetProc = proc (db: RootRef, key: openArray[byte]): seq[byte] {.
|
||||||
gcsafe, raises: [Defect].}
|
gcsafe, raises: [Defect].}
|
||||||
## The result will be empty seq if not found
|
## The result will be empty seq if not found
|
||||||
|
|
||||||
DelProc = proc (db: RootRef, key: openarray[byte]) {.
|
DelProc = proc (db: RootRef, key: openArray[byte]) {.
|
||||||
gcsafe, raises: [Defect].}
|
gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
ContainsProc = proc (db: RootRef, key: openarray[byte]): bool {.
|
ContainsProc = proc (db: RootRef, key: openArray[byte]): bool {.
|
||||||
gcsafe, raises: [Defect].}
|
gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
TrieDatabaseRef* = ref object
|
TrieDatabaseRef* = ref object
|
||||||
|
@ -49,19 +49,19 @@ type
|
||||||
|
|
||||||
TransactionID* = distinct DbTransaction
|
TransactionID* = distinct DbTransaction
|
||||||
|
|
||||||
proc put*(db: TrieDatabaseRef, key, val: openarray[byte]) {.gcsafe.}
|
proc put*(db: TrieDatabaseRef, key, val: openArray[byte]) {.gcsafe.}
|
||||||
proc get*(db: TrieDatabaseRef, key: openarray[byte]): seq[byte] {.gcsafe.}
|
proc get*(db: TrieDatabaseRef, key: openArray[byte]): seq[byte] {.gcsafe.}
|
||||||
proc del*(db: TrieDatabaseRef, key: openarray[byte]) {.gcsafe.}
|
proc del*(db: TrieDatabaseRef, key: openArray[byte]) {.gcsafe.}
|
||||||
proc beginTransaction*(db: TrieDatabaseRef): DbTransaction {.gcsafe.}
|
proc beginTransaction*(db: TrieDatabaseRef): DbTransaction {.gcsafe.}
|
||||||
|
|
||||||
proc keccak*(r: openArray[byte]): KeccakHash =
|
proc keccak*(r: openArray[byte]): KeccakHash =
|
||||||
keccak256.digest r
|
keccak256.digest r
|
||||||
|
|
||||||
proc get*(db: MemoryLayer, key: openarray[byte]): seq[byte] =
|
proc get*(db: MemoryLayer, key: openArray[byte]): seq[byte] =
|
||||||
result = db.records.getOrDefault(@key).value
|
result = db.records.getOrDefault(@key).value
|
||||||
traceGet key, result
|
traceGet key, result
|
||||||
|
|
||||||
proc del*(db: MemoryLayer, key: openarray[byte]) =
|
proc del*(db: MemoryLayer, key: openArray[byte]) =
|
||||||
traceDel key
|
traceDel key
|
||||||
|
|
||||||
# The database should ensure that the empty key is always active:
|
# The database should ensure that the empty key is always active:
|
||||||
|
@ -76,10 +76,10 @@ proc del*(db: MemoryLayer, key: openarray[byte]) =
|
||||||
db.records.del(key)
|
db.records.del(key)
|
||||||
db.deleted.incl(key)
|
db.deleted.incl(key)
|
||||||
|
|
||||||
proc contains*(db: MemoryLayer, key: openarray[byte]): bool =
|
proc contains*(db: MemoryLayer, key: openArray[byte]): bool =
|
||||||
db.records.hasKey(@key)
|
db.records.hasKey(@key)
|
||||||
|
|
||||||
proc put*(db: MemoryLayer, key, val: openarray[byte]) =
|
proc put*(db: MemoryLayer, key, val: openArray[byte]) =
|
||||||
tracePut key, val
|
tracePut key, val
|
||||||
|
|
||||||
# TODO: This is quite inefficient and it won't be necessary once
|
# TODO: This is quite inefficient and it won't be necessary once
|
||||||
|
@ -168,19 +168,19 @@ proc safeDispose*(t: DbTransaction) {.inline.} =
|
||||||
if (not isNil(t)) and (t.state == Pending):
|
if (not isNil(t)) and (t.state == Pending):
|
||||||
t.rollback()
|
t.rollback()
|
||||||
|
|
||||||
proc putImpl[T](db: RootRef, key, val: openarray[byte]) =
|
proc putImpl[T](db: RootRef, key, val: openArray[byte]) =
|
||||||
mixin put
|
mixin put
|
||||||
put(T(db), key, val)
|
put(T(db), key, val)
|
||||||
|
|
||||||
proc getImpl[T](db: RootRef, key: openarray[byte]): seq[byte] =
|
proc getImpl[T](db: RootRef, key: openArray[byte]): seq[byte] =
|
||||||
mixin get
|
mixin get
|
||||||
return get(T(db), key)
|
return get(T(db), key)
|
||||||
|
|
||||||
proc delImpl[T](db: RootRef, key: openarray[byte]) =
|
proc delImpl[T](db: RootRef, key: openArray[byte]) =
|
||||||
mixin del
|
mixin del
|
||||||
del(T(db), key)
|
del(T(db), key)
|
||||||
|
|
||||||
proc containsImpl[T](db: RootRef, key: openarray[byte]): bool =
|
proc containsImpl[T](db: RootRef, key: openArray[byte]): bool =
|
||||||
mixin contains
|
mixin contains
|
||||||
return contains(T(db), key)
|
return contains(T(db), key)
|
||||||
|
|
||||||
|
@ -194,14 +194,14 @@ proc trieDB*[T: RootRef](x: T): TrieDatabaseRef =
|
||||||
result.delProc = delImpl[T]
|
result.delProc = delImpl[T]
|
||||||
result.containsProc = containsImpl[T]
|
result.containsProc = containsImpl[T]
|
||||||
|
|
||||||
proc put*(db: TrieDatabaseRef, key, val: openarray[byte]) =
|
proc put*(db: TrieDatabaseRef, key, val: openArray[byte]) =
|
||||||
var t = db.mostInnerTransaction
|
var t = db.mostInnerTransaction
|
||||||
if t != nil:
|
if t != nil:
|
||||||
t.modifications.put(key, val)
|
t.modifications.put(key, val)
|
||||||
else:
|
else:
|
||||||
db.putProc(db.obj, key, val)
|
db.putProc(db.obj, key, val)
|
||||||
|
|
||||||
proc get*(db: TrieDatabaseRef, key: openarray[byte]): seq[byte] =
|
proc get*(db: TrieDatabaseRef, key: openArray[byte]): seq[byte] =
|
||||||
# TODO: This is quite inefficient and it won't be necessary once
|
# TODO: This is quite inefficient and it won't be necessary once
|
||||||
# https://github.com/nim-lang/Nim/issues/7457 is developed.
|
# https://github.com/nim-lang/Nim/issues/7457 is developed.
|
||||||
let key = @key
|
let key = @key
|
||||||
|
@ -216,14 +216,14 @@ proc get*(db: TrieDatabaseRef, key: openarray[byte]): seq[byte] =
|
||||||
if db.getProc != nil:
|
if db.getProc != nil:
|
||||||
result = db.getProc(db.obj, key)
|
result = db.getProc(db.obj, key)
|
||||||
|
|
||||||
proc del*(db: TrieDatabaseRef, key: openarray[byte]) =
|
proc del*(db: TrieDatabaseRef, key: openArray[byte]) =
|
||||||
var t = db.mostInnerTransaction
|
var t = db.mostInnerTransaction
|
||||||
if t != nil:
|
if t != nil:
|
||||||
t.modifications.del(key)
|
t.modifications.del(key)
|
||||||
else:
|
else:
|
||||||
db.delProc(db.obj, key)
|
db.delProc(db.obj, key)
|
||||||
|
|
||||||
proc contains*(db: TrieDatabaseRef, key: openarray[byte]): bool =
|
proc contains*(db: TrieDatabaseRef, key: openArray[byte]): bool =
|
||||||
# TODO: This is quite inefficient and it won't be necessary once
|
# TODO: This is quite inefficient and it won't be necessary once
|
||||||
# https://github.com/nim-lang/Nim/issues/7457 is developed.
|
# https://github.com/nim-lang/Nim/issues/7457 is developed.
|
||||||
let key = @key
|
let key = @key
|
||||||
|
|
|
@ -49,8 +49,8 @@ proc addSample*(c: var ClockDriftCalculator, actualDelay: uint32, currentTime: M
|
||||||
let distDown = c.averageDelayBase - actualDelay
|
let distDown = c.averageDelayBase - actualDelay
|
||||||
|
|
||||||
let distUp = actualDelay - c.averageDelayBase
|
let distUp = actualDelay - c.averageDelayBase
|
||||||
|
|
||||||
let averageDelaySample =
|
let averageDelaySample =
|
||||||
if (distDown > distUp):
|
if (distDown > distUp):
|
||||||
# averageDelayBase is smaller that actualDelay, sample should be positive
|
# averageDelayBase is smaller that actualDelay, sample should be positive
|
||||||
int64(distUp)
|
int64(distUp)
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
import
|
import
|
||||||
chronos,
|
chronos,
|
||||||
./utp_utils
|
./utp_utils
|
||||||
|
|
||||||
|
@ -45,8 +45,8 @@ proc addSample*(h: var DelayHistogram, sample: uint32, currentTime: Moment) =
|
||||||
h.delayBaseHistory[h.delayBaseIdx] = sample
|
h.delayBaseHistory[h.delayBaseIdx] = sample
|
||||||
|
|
||||||
if wrapCompareLess(sample, h.delayBase):
|
if wrapCompareLess(sample, h.delayBase):
|
||||||
h.delay_base = sample
|
h.delayBase = sample
|
||||||
|
|
||||||
let delay = sample - h.delayBase
|
let delay = sample - h.delayBase
|
||||||
|
|
||||||
h.currentDelayHistory[h.currentDelyIdx] = delay
|
h.currentDelayHistory[h.currentDelyIdx] = delay
|
||||||
|
|
|
@ -16,19 +16,19 @@ export options
|
||||||
# utp implementation.
|
# utp implementation.
|
||||||
# Another alternative would be to use standard deque from deques module, and caluclate
|
# Another alternative would be to use standard deque from deques module, and caluclate
|
||||||
# item indexes from their sequence numbers.
|
# item indexes from their sequence numbers.
|
||||||
type
|
type
|
||||||
GrowableCircularBuffer*[A] = object
|
GrowableCircularBuffer*[A] = object
|
||||||
items: seq[Option[A]]
|
items: seq[Option[A]]
|
||||||
mask: int
|
mask: int
|
||||||
|
|
||||||
# provided size will always be adjusted to next power of two
|
# provided size will always be adjusted to next power of two
|
||||||
proc init*[A](T: type GrowableCircularBuffer[A], size: Natural = 16): T =
|
proc init*[A](T: type GrowableCircularBuffer[A], size: Natural = 16): T =
|
||||||
let powOfTwoSize = nextPowerOfTwo(size)
|
let powOfTwoSize = nextPowerOfTwo(size)
|
||||||
T(
|
T(
|
||||||
items: newSeq[Option[A]](size),
|
items: newSeq[Option[A]](size),
|
||||||
mask: powOfTwoSize - 1
|
mask: powOfTwoSize - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
proc get*[A](buff: GrowableCircularBuffer[A], i: Natural): Option[A] =
|
proc get*[A](buff: GrowableCircularBuffer[A], i: Natural): Option[A] =
|
||||||
buff.items[i and buff.mask]
|
buff.items[i and buff.mask]
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ proc exists*[A](buff: GrowableCircularBuffer[A], i: Natural, check: proc (x: A):
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
proc `[]`*[A](buff: var GrowableCircularBuffer[A], i: Natural): var A =
|
proc `[]`*[A](buff: var GrowableCircularBuffer[A], i: Natural): var A =
|
||||||
## Returns contents of the `var GrowableCircularBuffer`. If it is not set, then an exception
|
## Returns contents of the `var GrowableCircularBuffer`. If it is not set, then an exception
|
||||||
## is thrown.
|
## is thrown.
|
||||||
buff.items[i and buff.mask].get()
|
buff.items[i and buff.mask].get()
|
||||||
|
|
|
@ -32,10 +32,10 @@ proc applyCongestionControl*(
|
||||||
minRtt: Duration,
|
minRtt: Duration,
|
||||||
calculatedDelay: Duration,
|
calculatedDelay: Duration,
|
||||||
clockDrift: int32
|
clockDrift: int32
|
||||||
): (uint32, uint32, bool) =
|
): (uint32, uint32, bool) =
|
||||||
if (actualDelay.isZero() or minRtt.isZero() or numOfAckedBytes == 0):
|
if (actualDelay.isZero() or minRtt.isZero() or numOfAckedBytes == 0):
|
||||||
return (currentMaxWindowSize, currentSlowStartTreshold, currentSlowStart)
|
return (currentMaxWindowSize, currentSlowStartTreshold, currentSlowStart)
|
||||||
|
|
||||||
let ourDelay = min(minRtt, calculatedDelay)
|
let ourDelay = min(minRtt, calculatedDelay)
|
||||||
|
|
||||||
let target = targetDelay
|
let target = targetDelay
|
||||||
|
@ -64,7 +64,7 @@ proc applyCongestionControl*(
|
||||||
# double window_factor = (double)min(bytes_acked, max_window) / (double)max(max_window, bytes_acked);
|
# double window_factor = (double)min(bytes_acked, max_window) / (double)max(max_window, bytes_acked);
|
||||||
# double delay_factor = off_target / target;
|
# double delay_factor = off_target / target;
|
||||||
# double scaled_gain = MAX_CWND_INCREASE_BYTES_PER_RTT * window_factor * delay_factor;
|
# double scaled_gain = MAX_CWND_INCREASE_BYTES_PER_RTT * window_factor * delay_factor;
|
||||||
|
|
||||||
let windowFactor = float64(min(numOfAckedBytes, currentMaxWindowSize)) / float64(max(currentMaxWindowSize, numOfAckedBytes))
|
let windowFactor = float64(min(numOfAckedBytes, currentMaxWindowSize)) / float64(max(currentMaxWindowSize, numOfAckedBytes))
|
||||||
|
|
||||||
let delayFactor = float64(offTarget) / float64(target.microseconds())
|
let delayFactor = float64(offTarget) / float64(target.microseconds())
|
||||||
|
@ -73,7 +73,7 @@ proc applyCongestionControl*(
|
||||||
|
|
||||||
let scaledWindow = float64(currentMaxWindowSize) + scaledGain
|
let scaledWindow = float64(currentMaxWindowSize) + scaledGain
|
||||||
|
|
||||||
let ledbatCwnd: uint32 =
|
let ledbatCwnd: uint32 =
|
||||||
if scaledWindow < minWindowSize:
|
if scaledWindow < minWindowSize:
|
||||||
uint32(minWindowSize)
|
uint32(minWindowSize)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -15,14 +15,14 @@ import
|
||||||
|
|
||||||
export results
|
export results
|
||||||
|
|
||||||
const
|
const
|
||||||
minimalHeaderSize = 20
|
minimalHeaderSize = 20
|
||||||
minimalHeaderSizeWithSelectiveAck = 26
|
minimalHeaderSizeWithSelectiveAck = 26
|
||||||
protocolVersion = 1
|
protocolVersion = 1
|
||||||
zeroMoment = Moment.init(0, Nanosecond)
|
zeroMoment = Moment.init(0, Nanosecond)
|
||||||
acksArrayLength: uint8 = 4
|
acksArrayLength: uint8 = 4
|
||||||
|
|
||||||
type
|
type
|
||||||
PacketType* = enum
|
PacketType* = enum
|
||||||
ST_DATA = 0,
|
ST_DATA = 0,
|
||||||
ST_FIN = 1,
|
ST_FIN = 1,
|
||||||
|
@ -65,7 +65,7 @@ type
|
||||||
# 2. Monotonicity
|
# 2. Monotonicity
|
||||||
# Reference lib have a lot of checks to assume that this is monotonic on
|
# Reference lib have a lot of checks to assume that this is monotonic on
|
||||||
# every system, and warnings when monotonic clock is not avaialable.
|
# every system, and warnings when monotonic clock is not avaialable.
|
||||||
proc getMonoTimestamp*(): TimeStampInfo =
|
proc getMonoTimestamp*(): TimeStampInfo =
|
||||||
let currentMoment = Moment.now()
|
let currentMoment = Moment.now()
|
||||||
|
|
||||||
# Casting this value from int64 to uin32, my lead to some sudden spikes in
|
# Casting this value from int64 to uin32, my lead to some sudden spikes in
|
||||||
|
@ -75,7 +75,7 @@ proc getMonoTimestamp*(): TimeStampInfo =
|
||||||
# uTP implementation is resistant to those spikes are as it keeps history of
|
# uTP implementation is resistant to those spikes are as it keeps history of
|
||||||
# few last delays on uses smallest one for calculating ledbat window.
|
# few last delays on uses smallest one for calculating ledbat window.
|
||||||
# so any outlier huge value will be ignored
|
# so any outlier huge value will be ignored
|
||||||
#
|
#
|
||||||
let timestamp = uint32((currentMoment - zeroMoment).microseconds())
|
let timestamp = uint32((currentMoment - zeroMoment).microseconds())
|
||||||
TimeStampInfo(moment: currentMoment, timestamp: timestamp)
|
TimeStampInfo(moment: currentMoment, timestamp: timestamp)
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ proc encodePacket*(p: Packet): seq[byte] =
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
# This should not happen in case of in-memory streams
|
# This should not happen in case of in-memory streams
|
||||||
raiseAssert e.msg
|
raiseAssert e.msg
|
||||||
|
|
||||||
proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
||||||
let receivedBytesLength = len(bytes)
|
let receivedBytesLength = len(bytes)
|
||||||
if receivedBytesLength < minimalHeaderSize:
|
if receivedBytesLength < minimalHeaderSize:
|
||||||
|
@ -139,11 +139,11 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
||||||
let version = bytes[0] and 0xf
|
let version = bytes[0] and 0xf
|
||||||
if version != protocolVersion:
|
if version != protocolVersion:
|
||||||
return err("invalid packet version")
|
return err("invalid packet version")
|
||||||
|
|
||||||
var kind: PacketType
|
var kind: PacketType
|
||||||
if not checkedEnumAssign(kind, (bytes[0] shr 4)):
|
if not checkedEnumAssign(kind, (bytes[0] shr 4)):
|
||||||
return err("Invalid message type")
|
return err("Invalid message type")
|
||||||
|
|
||||||
let extensionByte = bytes[1]
|
let extensionByte = bytes[1]
|
||||||
|
|
||||||
if (not (extensionByte == 0 or extensionByte == 1)):
|
if (not (extensionByte == 0 or extensionByte == 1)):
|
||||||
|
@ -161,7 +161,7 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
||||||
seq_nr: fromBytesBE(uint16, bytes.toOpenArray(16, 17)),
|
seq_nr: fromBytesBE(uint16, bytes.toOpenArray(16, 17)),
|
||||||
ack_nr: fromBytesBE(uint16, bytes.toOpenArray(18, 19)),
|
ack_nr: fromBytesBE(uint16, bytes.toOpenArray(18, 19)),
|
||||||
)
|
)
|
||||||
|
|
||||||
if extensionByte == 0:
|
if extensionByte == 0:
|
||||||
# packet without any extensions
|
# packet without any extensions
|
||||||
let payload =
|
let payload =
|
||||||
|
@ -175,7 +175,7 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
||||||
# packet with selective ack extension
|
# packet with selective ack extension
|
||||||
if (receivedBytesLength < minimalHeaderSizeWithSelectiveAck):
|
if (receivedBytesLength < minimalHeaderSizeWithSelectiveAck):
|
||||||
return err("Packet too short for selective ack extension")
|
return err("Packet too short for selective ack extension")
|
||||||
|
|
||||||
let nextExtension = bytes[20]
|
let nextExtension = bytes[20]
|
||||||
let extLength = bytes[21]
|
let extLength = bytes[21]
|
||||||
|
|
||||||
|
@ -186,11 +186,11 @@ proc decodePacket*(bytes: openArray[byte]): Result[Packet, string] =
|
||||||
# as 4byte bit mask is able to ack 32 packets in the future which is more than enough
|
# as 4byte bit mask is able to ack 32 packets in the future which is more than enough
|
||||||
if (nextExtension != 0 or extLength != 4):
|
if (nextExtension != 0 or extLength != 4):
|
||||||
return err("Bad format of selective ack extension")
|
return err("Bad format of selective ack extension")
|
||||||
|
|
||||||
|
|
||||||
let extension = SelectiveAckExtension(
|
let extension = SelectiveAckExtension(
|
||||||
acks: [bytes[22], bytes[23], bytes[24], bytes[25]]
|
acks: [bytes[22], bytes[23], bytes[24], bytes[25]]
|
||||||
)
|
)
|
||||||
|
|
||||||
let payload =
|
let payload =
|
||||||
if (receivedBytesLength == minimalHeaderSizeWithSelectiveAck):
|
if (receivedBytesLength == minimalHeaderSizeWithSelectiveAck):
|
||||||
|
@ -235,7 +235,7 @@ proc ackPacket*(
|
||||||
bufferSize: uint32,
|
bufferSize: uint32,
|
||||||
timestampDiff: uint32,
|
timestampDiff: uint32,
|
||||||
acksBitmask: Option[array[4, byte]] = none[array[4, byte]]()
|
acksBitmask: Option[array[4, byte]] = none[array[4, byte]]()
|
||||||
): Packet =
|
): Packet =
|
||||||
|
|
||||||
let (extensionByte, extensionData) =
|
let (extensionByte, extensionData) =
|
||||||
if acksBitmask.isSome():
|
if acksBitmask.isSome():
|
||||||
|
@ -254,7 +254,6 @@ proc ackPacket*(
|
||||||
seqNr: seqNr,
|
seqNr: seqNr,
|
||||||
ackNr: ackNr
|
ackNr: ackNr
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
Packet(header: h, eack: extensionData, payload: @[])
|
Packet(header: h, eack: extensionData, payload: @[])
|
||||||
|
|
||||||
|
@ -265,7 +264,7 @@ proc dataPacket*(
|
||||||
bufferSize: uint32,
|
bufferSize: uint32,
|
||||||
payload: seq[byte],
|
payload: seq[byte],
|
||||||
timestampDiff: uint32
|
timestampDiff: uint32
|
||||||
): Packet =
|
): Packet =
|
||||||
let h = PacketHeaderV1(
|
let h = PacketHeaderV1(
|
||||||
pType: ST_DATA,
|
pType: ST_DATA,
|
||||||
version: protocolVersion,
|
version: protocolVersion,
|
||||||
|
@ -278,10 +277,10 @@ proc dataPacket*(
|
||||||
seqNr: seqNr,
|
seqNr: seqNr,
|
||||||
ackNr: ackNr
|
ackNr: ackNr
|
||||||
)
|
)
|
||||||
|
|
||||||
Packet(header: h, eack: none[SelectiveAckExtension](), payload: payload)
|
Packet(header: h, eack: none[SelectiveAckExtension](), payload: payload)
|
||||||
|
|
||||||
proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet =
|
proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet =
|
||||||
let h = PacketHeaderV1(
|
let h = PacketHeaderV1(
|
||||||
pType: ST_RESET,
|
pType: ST_RESET,
|
||||||
version: protocolVersion,
|
version: protocolVersion,
|
||||||
|
@ -296,7 +295,7 @@ proc resetPacket*(seqNr: uint16, sndConnectionId: uint16, ackNr: uint16): Packet
|
||||||
seqNr: seqNr,
|
seqNr: seqNr,
|
||||||
ackNr: ackNr
|
ackNr: ackNr
|
||||||
)
|
)
|
||||||
|
|
||||||
Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[])
|
Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[])
|
||||||
|
|
||||||
proc finPacket*(
|
proc finPacket*(
|
||||||
|
@ -305,7 +304,7 @@ proc finPacket*(
|
||||||
ackNr: uint16,
|
ackNr: uint16,
|
||||||
bufferSize: uint32,
|
bufferSize: uint32,
|
||||||
timestampDiff: uint32
|
timestampDiff: uint32
|
||||||
): Packet =
|
): Packet =
|
||||||
let h = PacketHeaderV1(
|
let h = PacketHeaderV1(
|
||||||
pType: ST_FIN,
|
pType: ST_FIN,
|
||||||
version: protocolVersion,
|
version: protocolVersion,
|
||||||
|
@ -318,5 +317,5 @@ proc finPacket*(
|
||||||
seqNr: seqNr,
|
seqNr: seqNr,
|
||||||
ackNr: ackNr
|
ackNr: ackNr
|
||||||
)
|
)
|
||||||
|
|
||||||
Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[])
|
Packet(header: h, eack: none[SelectiveAckExtension](), payload: @[])
|
||||||
|
|
|
@ -31,7 +31,7 @@ type SendBufferTracker* = ref object
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type SendBufferTracker,
|
T: type SendBufferTracker,
|
||||||
currentWindow: uint32,
|
currentWindow: uint32,
|
||||||
maxRemoteWindow: uint32,
|
maxRemoteWindow: uint32,
|
||||||
maxSndBufferSize: uint32,
|
maxSndBufferSize: uint32,
|
||||||
maxWindow: uint32): T =
|
maxWindow: uint32): T =
|
||||||
return (
|
return (
|
||||||
|
@ -70,7 +70,7 @@ proc updateMaxRemote*(t: SendBufferTracker, newRemoteWindow: uint32) =
|
||||||
t.maxRemoteWindow = newRemoteWindow
|
t.maxRemoteWindow = newRemoteWindow
|
||||||
t.checkWaiters()
|
t.checkWaiters()
|
||||||
|
|
||||||
proc updateMaxWindowSize*(t: SendBufferTracker, newRemoteWindow: uint32, maxWindow: uint32) =
|
proc updateMaxWindowSize*(t: SendBufferTracker, newRemoteWindow: uint32, maxWindow: uint32) =
|
||||||
t.maxRemoteWindow = newRemoteWindow
|
t.maxRemoteWindow = newRemoteWindow
|
||||||
t.maxWindow = maxWindow
|
t.maxWindow = maxWindow
|
||||||
t.checkWaiters()
|
t.checkWaiters()
|
||||||
|
|
|
@ -28,7 +28,7 @@ proc hash(x: UtpSocketKey[Node]): Hash =
|
||||||
proc initSendCallback(
|
proc initSendCallback(
|
||||||
t: protocol.Protocol, subProtocolName: seq[byte]): SendCallback[Node] =
|
t: protocol.Protocol, subProtocolName: seq[byte]): SendCallback[Node] =
|
||||||
return (
|
return (
|
||||||
proc (to: Node, data: seq[byte]): Future[void] =
|
proc (to: Node, data: seq[byte]): Future[void] =
|
||||||
let fut = newFuture[void]()
|
let fut = newFuture[void]()
|
||||||
# TODO: In discovery v5 each talkreq waits for a talkresp, but here we
|
# TODO: In discovery v5 each talkreq waits for a talkresp, but here we
|
||||||
# would really like the fire and forget semantics (similar to udp).
|
# would really like the fire and forget semantics (similar to udp).
|
||||||
|
@ -54,7 +54,7 @@ proc messageHandler(protocol: TalkProtocol, request: seq[byte],
|
||||||
@[]
|
@[]
|
||||||
else:
|
else:
|
||||||
@[]
|
@[]
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type UtpDiscv5Protocol,
|
T: type UtpDiscv5Protocol,
|
||||||
p: protocol.Protocol,
|
p: protocol.Protocol,
|
||||||
|
|
|
@ -68,18 +68,18 @@ proc processDatagram(transp: DatagramTransport, raddr: TransportAddress):
|
||||||
|
|
||||||
proc initSendCallback(t: DatagramTransport): SendCallback[TransportAddress] =
|
proc initSendCallback(t: DatagramTransport): SendCallback[TransportAddress] =
|
||||||
return (
|
return (
|
||||||
proc (to: TransportAddress, data: seq[byte]): Future[void] =
|
proc (to: TransportAddress, data: seq[byte]): Future[void] =
|
||||||
t.sendTo(to, data)
|
t.sendTo(to, data)
|
||||||
)
|
)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type UtpProtocol,
|
T: type UtpProtocol,
|
||||||
acceptConnectionCb: AcceptConnectionCallback[TransportAddress],
|
acceptConnectionCb: AcceptConnectionCallback[TransportAddress],
|
||||||
address: TransportAddress,
|
address: TransportAddress,
|
||||||
socketConfig: SocketConfig = SocketConfig.init(),
|
socketConfig: SocketConfig = SocketConfig.init(),
|
||||||
allowConnectionCb: AllowConnectionCallback[TransportAddress] = nil,
|
allowConnectionCb: AllowConnectionCallback[TransportAddress] = nil,
|
||||||
rng = newRng()): UtpProtocol {.raises: [Defect, CatchableError].} =
|
rng = newRng()): UtpProtocol {.raises: [Defect, CatchableError].} =
|
||||||
|
|
||||||
doAssert(not(isNil(acceptConnectionCb)))
|
doAssert(not(isNil(acceptConnectionCb)))
|
||||||
|
|
||||||
let router = UtpRouter[TransportAddress].new(
|
let router = UtpRouter[TransportAddress].new(
|
||||||
|
@ -97,7 +97,7 @@ proc shutdownWait*(p: UtpProtocol): Future[void] {.async.} =
|
||||||
## closes all managed utp sockets and then underlying transport
|
## closes all managed utp sockets and then underlying transport
|
||||||
await p.utpRouter.shutdownWait()
|
await p.utpRouter.shutdownWait()
|
||||||
await p.transport.closeWait()
|
await p.transport.closeWait()
|
||||||
|
|
||||||
proc connectTo*(r: UtpProtocol, address: TransportAddress): Future[ConnectionResult[TransportAddress]] =
|
proc connectTo*(r: UtpProtocol, address: TransportAddress): Future[ConnectionResult[TransportAddress]] =
|
||||||
return r.utpRouter.connectTo(address)
|
return r.utpRouter.connectTo(address)
|
||||||
|
|
||||||
|
|
|
@ -184,7 +184,7 @@ proc processPacket[A](r: UtpRouter[A], p: Packet, sender: A) {.async.}=
|
||||||
let rstPacket = resetPacket(randUint16(r.rng[]), p.header.connectionId, p.header.seqNr)
|
let rstPacket = resetPacket(randUint16(r.rng[]), p.header.connectionId, p.header.seqNr)
|
||||||
await r.sendCb(sender, encodePacket(rstPacket))
|
await r.sendCb(sender, encodePacket(rstPacket))
|
||||||
|
|
||||||
proc processIncomingBytes*[A](r: UtpRouter[A], bytes: seq[byte], sender: A) {.async.} =
|
proc processIncomingBytes*[A](r: UtpRouter[A], bytes: seq[byte], sender: A) {.async.} =
|
||||||
if (not r.closed):
|
if (not r.closed):
|
||||||
let dec = decodePacket(bytes)
|
let dec = decodePacket(bytes)
|
||||||
if (dec.isOk()):
|
if (dec.isOk()):
|
||||||
|
@ -202,11 +202,11 @@ proc generateNewUniqueSocket[A](r: UtpRouter[A], address: A): Option[UtpSocket[A
|
||||||
|
|
||||||
if r.registerIfAbsent(socket):
|
if r.registerIfAbsent(socket):
|
||||||
return some(socket)
|
return some(socket)
|
||||||
|
|
||||||
inc tryCount
|
inc tryCount
|
||||||
|
|
||||||
return none[UtpSocket[A]]()
|
return none[UtpSocket[A]]()
|
||||||
|
|
||||||
proc connect[A](s: UtpSocket[A]): Future[ConnectionResult[A]] {.async.}=
|
proc connect[A](s: UtpSocket[A]): Future[ConnectionResult[A]] {.async.}=
|
||||||
let startFut = s.startOutgoingSocket()
|
let startFut = s.startOutgoingSocket()
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ proc connectTo*[A](r: UtpRouter[A], address: A, connectionId: uint16): Future[Co
|
||||||
let socket = newOutgoingSocket[A](address, r.sendCb, r.socketConfig, connectionId, r.rng[])
|
let socket = newOutgoingSocket[A](address, r.sendCb, r.socketConfig, connectionId, r.rng[])
|
||||||
|
|
||||||
if (r.registerIfAbsent(socket)):
|
if (r.registerIfAbsent(socket)):
|
||||||
return await socket.connect()
|
return await socket.connect()
|
||||||
else:
|
else:
|
||||||
return err(OutgoingConnectionError(kind: SocketAlreadyExists))
|
return err(OutgoingConnectionError(kind: SocketAlreadyExists))
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ type
|
||||||
UtpSocketKey*[A] = object
|
UtpSocketKey*[A] = object
|
||||||
remoteAddress*: A
|
remoteAddress*: A
|
||||||
rcvId*: uint16
|
rcvId*: uint16
|
||||||
|
|
||||||
OutgoingPacket = object
|
OutgoingPacket = object
|
||||||
packetBytes: seq[byte]
|
packetBytes: seq[byte]
|
||||||
transmissions: uint16
|
transmissions: uint16
|
||||||
|
@ -50,7 +50,7 @@ type
|
||||||
SendCallback*[A] = proc (to: A, data: seq[byte]): Future[void] {.gcsafe, raises: [Defect]}
|
SendCallback*[A] = proc (to: A, data: seq[byte]): Future[void] {.gcsafe, raises: [Defect]}
|
||||||
|
|
||||||
SocketConfig* = object
|
SocketConfig* = object
|
||||||
# This is configurable (in contrast to reference impl), as with standard 2 syn resends
|
# This is configurable (in contrast to reference impl), as with standard 2 syn resends
|
||||||
# default timeout set to 3seconds and doubling of timeout with each re-send, it
|
# default timeout set to 3seconds and doubling of timeout with each re-send, it
|
||||||
# means that initial connection would timeout after 21s, which seems rather long
|
# means that initial connection would timeout after 21s, which seems rather long
|
||||||
initialSynTimeout*: Duration
|
initialSynTimeout*: Duration
|
||||||
|
@ -77,7 +77,7 @@ type
|
||||||
remoteWindowResetTimeout*: Duration
|
remoteWindowResetTimeout*: Duration
|
||||||
|
|
||||||
WriteErrorType* = enum
|
WriteErrorType* = enum
|
||||||
SocketNotWriteable,
|
SocketNotWriteable,
|
||||||
FinSent
|
FinSent
|
||||||
|
|
||||||
WriteError* = object
|
WriteError* = object
|
||||||
|
@ -114,11 +114,11 @@ type
|
||||||
seqNr: uint16
|
seqNr: uint16
|
||||||
# All seq number up to this havve been correctly acked by us
|
# All seq number up to this havve been correctly acked by us
|
||||||
ackNr: uint16
|
ackNr: uint16
|
||||||
|
|
||||||
# Should be completed after succesful connection to remote host or after timeout
|
# Should be completed after succesful connection to remote host or after timeout
|
||||||
# for the first syn packet
|
# for the first syn packet
|
||||||
connectionFuture: Future[void]
|
connectionFuture: Future[void]
|
||||||
|
|
||||||
# the number of packets in the send queue. Packets that haven't
|
# the number of packets in the send queue. Packets that haven't
|
||||||
# yet been sent count as well as packets marked as needing resend
|
# yet been sent count as well as packets marked as needing resend
|
||||||
# the oldest un-acked packet in the send queue is seq_nr - cur_window_packets
|
# the oldest un-acked packet in the send queue is seq_nr - cur_window_packets
|
||||||
|
@ -135,7 +135,7 @@ type
|
||||||
|
|
||||||
# current retransmit Timeout used to calculate rtoTimeout
|
# current retransmit Timeout used to calculate rtoTimeout
|
||||||
retransmitTimeout: Duration
|
retransmitTimeout: Duration
|
||||||
|
|
||||||
# calculated round trip time during communication with remote peer
|
# calculated round trip time during communication with remote peer
|
||||||
rtt: Duration
|
rtt: Duration
|
||||||
# calculated round trip time variance
|
# calculated round trip time variance
|
||||||
|
@ -147,7 +147,7 @@ type
|
||||||
# RTO timeout will happen when currenTime > rtoTimeout
|
# RTO timeout will happen when currenTime > rtoTimeout
|
||||||
rtoTimeout: Moment
|
rtoTimeout: Moment
|
||||||
|
|
||||||
# rcvBuffer
|
# rcvBuffer
|
||||||
buffer: AsyncBuffer
|
buffer: AsyncBuffer
|
||||||
|
|
||||||
# loop called every 500ms to check for on going timeout status
|
# loop called every 500ms to check for on going timeout status
|
||||||
|
@ -223,7 +223,7 @@ type
|
||||||
|
|
||||||
OutgoingConnectionErrorType* = enum
|
OutgoingConnectionErrorType* = enum
|
||||||
SocketAlreadyExists, ConnectionTimedOut, ErrorWhileSendingSyn
|
SocketAlreadyExists, ConnectionTimedOut, ErrorWhileSendingSyn
|
||||||
|
|
||||||
OutgoingConnectionError* = object
|
OutgoingConnectionError* = object
|
||||||
case kind*: OutgoingConnectionErrorType
|
case kind*: OutgoingConnectionErrorType
|
||||||
of ErrorWhileSendingSyn:
|
of ErrorWhileSendingSyn:
|
||||||
|
@ -243,7 +243,7 @@ const
|
||||||
# How often each socket check its different on going timers
|
# How often each socket check its different on going timers
|
||||||
checkTimeoutsLoopInterval = milliseconds(500)
|
checkTimeoutsLoopInterval = milliseconds(500)
|
||||||
|
|
||||||
# Defualt initial timeout for first Syn packet
|
# Defualt initial timeout for first Syn packet
|
||||||
defaultInitialSynTimeout = milliseconds(3000)
|
defaultInitialSynTimeout = milliseconds(3000)
|
||||||
|
|
||||||
# Initial timeout to receive first Data data packet after receiving initial Syn
|
# Initial timeout to receive first Data data packet after receiving initial Syn
|
||||||
|
@ -274,7 +274,7 @@ const
|
||||||
# to zero. i.e when we received a packet from remote peer with `wndSize` set to 0.
|
# to zero. i.e when we received a packet from remote peer with `wndSize` set to 0.
|
||||||
defaultResetWindowTimeout = seconds(15)
|
defaultResetWindowTimeout = seconds(15)
|
||||||
|
|
||||||
# If remote peer window drops to zero, then after some time we will reset it
|
# If remote peer window drops to zero, then after some time we will reset it
|
||||||
# to this value even if we do not receive any more messages from remote peers.
|
# to this value even if we do not receive any more messages from remote peers.
|
||||||
# Reset period is configured in `SocketConfig`
|
# Reset period is configured in `SocketConfig`
|
||||||
minimalRemoteWindow: uint32 = 1500
|
minimalRemoteWindow: uint32 = 1500
|
||||||
|
@ -307,7 +307,7 @@ proc init(
|
||||||
)
|
)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type SocketConfig,
|
T: type SocketConfig,
|
||||||
initialSynTimeout: Duration = defaultInitialSynTimeout,
|
initialSynTimeout: Duration = defaultInitialSynTimeout,
|
||||||
dataResendsBeforeFailure: uint16 = defaultDataResendsBeforeFailure,
|
dataResendsBeforeFailure: uint16 = defaultDataResendsBeforeFailure,
|
||||||
optRcvBuffer: uint32 = defaultOptRcvBuffer,
|
optRcvBuffer: uint32 = defaultOptRcvBuffer,
|
||||||
|
@ -345,11 +345,11 @@ proc sendAck(socket: UtpSocket): Future[void] =
|
||||||
## Creates and sends ack, based on current socket state. Acks are different from
|
## Creates and sends ack, based on current socket state. Acks are different from
|
||||||
## other packets as we do not track them in outgoing buffet
|
## other packets as we do not track them in outgoing buffet
|
||||||
|
|
||||||
let ackPacket =
|
let ackPacket =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
socket.seqNr,
|
socket.seqNr,
|
||||||
socket.connectionIdSnd,
|
socket.connectionIdSnd,
|
||||||
socket.ackNr,
|
socket.ackNr,
|
||||||
socket.getRcvWindowSize(),
|
socket.getRcvWindowSize(),
|
||||||
socket.replayMicro
|
socket.replayMicro
|
||||||
)
|
)
|
||||||
|
@ -399,13 +399,13 @@ proc markAllPacketAsLost(s: UtpSocket) =
|
||||||
|
|
||||||
proc isOpened(socket:UtpSocket): bool =
|
proc isOpened(socket:UtpSocket): bool =
|
||||||
return (
|
return (
|
||||||
socket.state == SynRecv or
|
socket.state == SynRecv or
|
||||||
socket.state == SynSent or
|
socket.state == SynSent or
|
||||||
socket.state == Connected
|
socket.state == Connected
|
||||||
)
|
)
|
||||||
|
|
||||||
proc shouldDisconnectFromFailedRemote(socket: UtpSocket): bool =
|
proc shouldDisconnectFromFailedRemote(socket: UtpSocket): bool =
|
||||||
(socket.state == SynSent and socket.retransmitCount >= 2) or
|
(socket.state == SynSent and socket.retransmitCount >= 2) or
|
||||||
(socket.retransmitCount >= socket.socketConfig.dataResendsBeforeFailure)
|
(socket.retransmitCount >= socket.socketConfig.dataResendsBeforeFailure)
|
||||||
|
|
||||||
proc checkTimeouts(socket: UtpSocket) {.async.} =
|
proc checkTimeouts(socket: UtpSocket) {.async.} =
|
||||||
|
@ -415,13 +415,13 @@ proc checkTimeouts(socket: UtpSocket) {.async.} =
|
||||||
await socket.flushPackets()
|
await socket.flushPackets()
|
||||||
|
|
||||||
if socket.isOpened():
|
if socket.isOpened():
|
||||||
|
|
||||||
if (socket.sendBufferTracker.maxRemoteWindow == 0 and currentTime > socket.zeroWindowTimer):
|
if (socket.sendBufferTracker.maxRemoteWindow == 0 and currentTime > socket.zeroWindowTimer):
|
||||||
debug "Reset remote window to minimal value"
|
debug "Reset remote window to minimal value"
|
||||||
socket.sendBufferTracker.updateMaxRemote(minimalRemoteWindow)
|
socket.sendBufferTracker.updateMaxRemote(minimalRemoteWindow)
|
||||||
|
|
||||||
if (currentTime > socket.rtoTimeout):
|
if (currentTime > socket.rtoTimeout):
|
||||||
|
|
||||||
# TODO add handling of probe time outs. Reference implemenation has mechanism
|
# TODO add handling of probe time outs. Reference implemenation has mechanism
|
||||||
# of sending probes to determine mtu size. Probe timeouts do not count to standard
|
# of sending probes to determine mtu size. Probe timeouts do not count to standard
|
||||||
# timeouts calculations
|
# timeouts calculations
|
||||||
|
@ -431,7 +431,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} =
|
||||||
if (socket.state == SynRecv):
|
if (socket.state == SynRecv):
|
||||||
socket.destroy()
|
socket.destroy()
|
||||||
return
|
return
|
||||||
|
|
||||||
if socket.shouldDisconnectFromFailedRemote():
|
if socket.shouldDisconnectFromFailedRemote():
|
||||||
if socket.state == SynSent and (not socket.connectionFuture.finished()):
|
if socket.state == SynSent and (not socket.connectionFuture.finished()):
|
||||||
socket.connectionFuture.fail(newException(ConnectionError, "Connection to peer timed out"))
|
socket.connectionFuture.fail(newException(ConnectionError, "Connection to peer timed out"))
|
||||||
|
@ -442,7 +442,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} =
|
||||||
let newTimeout = socket.retransmitTimeout * 2
|
let newTimeout = socket.retransmitTimeout * 2
|
||||||
socket.retransmitTimeout = newTimeout
|
socket.retransmitTimeout = newTimeout
|
||||||
socket.rtoTimeout = currentTime + newTimeout
|
socket.rtoTimeout = currentTime + newTimeout
|
||||||
|
|
||||||
let currentPacketSize = uint32(socket.getPacketSize())
|
let currentPacketSize = uint32(socket.getPacketSize())
|
||||||
|
|
||||||
if (socket.curWindowPackets == 0 and socket.sendBufferTracker.maxWindow > currentPacketSize):
|
if (socket.curWindowPackets == 0 and socket.sendBufferTracker.maxWindow > currentPacketSize):
|
||||||
|
@ -470,7 +470,7 @@ proc checkTimeouts(socket: UtpSocket) {.async.} =
|
||||||
# as then every selecivly acked packet restes timeout timer and removes packet
|
# as then every selecivly acked packet restes timeout timer and removes packet
|
||||||
# from out buffer.
|
# from out buffer.
|
||||||
markAllPacketAsLost(socket)
|
markAllPacketAsLost(socket)
|
||||||
|
|
||||||
# resend oldest packet if there are some packets in flight
|
# resend oldest packet if there are some packets in flight
|
||||||
if (socket.curWindowPackets > 0):
|
if (socket.curWindowPackets > 0):
|
||||||
notice "resending oldest packet in outBuffer"
|
notice "resending oldest packet in outBuffer"
|
||||||
|
@ -531,7 +531,7 @@ proc handleDataWrite(socket: UtpSocket, data: seq[byte], writeFut: Future[WriteR
|
||||||
if socket.curWindowPackets == 0:
|
if socket.curWindowPackets == 0:
|
||||||
socket.resetSendTimeout()
|
socket.resetSendTimeout()
|
||||||
|
|
||||||
let dataPacket =
|
let dataPacket =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
socket.seqNr,
|
socket.seqNr,
|
||||||
socket.connectionIdSnd,
|
socket.connectionIdSnd,
|
||||||
|
@ -544,7 +544,7 @@ proc handleDataWrite(socket: UtpSocket, data: seq[byte], writeFut: Future[WriteR
|
||||||
socket.registerOutgoingPacket(outgoingPacket)
|
socket.registerOutgoingPacket(outgoingPacket)
|
||||||
await socket.sendData(outgoingPacket.packetBytes)
|
await socket.sendData(outgoingPacket.packetBytes)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
# write loop has been cancelled in the middle of processing due to the
|
# write loop has been cancelled in the middle of processing due to the
|
||||||
# socket closing
|
# socket closing
|
||||||
# this approach can create partial write in case destroyin socket in the
|
# this approach can create partial write in case destroyin socket in the
|
||||||
# the middle of the write
|
# the middle of the write
|
||||||
|
@ -566,8 +566,8 @@ proc handleClose(socket: UtpSocket): Future[void] {.async.} =
|
||||||
try:
|
try:
|
||||||
if socket.curWindowPackets == 0:
|
if socket.curWindowPackets == 0:
|
||||||
socket.resetSendTimeout()
|
socket.resetSendTimeout()
|
||||||
|
|
||||||
let finEncoded =
|
let finEncoded =
|
||||||
encodePacket(
|
encodePacket(
|
||||||
finPacket(
|
finPacket(
|
||||||
socket.seqNr,
|
socket.seqNr,
|
||||||
|
@ -577,13 +577,13 @@ proc handleClose(socket: UtpSocket): Future[void] {.async.} =
|
||||||
socket.replayMicro
|
socket.replayMicro
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
socket.registerOutgoingPacket(OutgoingPacket.init(finEncoded, 1, true, 0))
|
socket.registerOutgoingPacket(OutgoingPacket.init(finEncoded, 1, true, 0))
|
||||||
await socket.sendData(finEncoded)
|
await socket.sendData(finEncoded)
|
||||||
socket.finSent = true
|
socket.finSent = true
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
proc writeLoop(socket: UtpSocket): Future[void] {.async.} =
|
proc writeLoop(socket: UtpSocket): Future[void] {.async.} =
|
||||||
## Loop that processes writes on socket
|
## Loop that processes writes on socket
|
||||||
try:
|
try:
|
||||||
while true:
|
while true:
|
||||||
|
@ -603,7 +603,7 @@ proc writeLoop(socket: UtpSocket): Future[void] {.async.} =
|
||||||
socket.writeQueue.clear()
|
socket.writeQueue.clear()
|
||||||
trace "writeLoop canceled"
|
trace "writeLoop canceled"
|
||||||
|
|
||||||
proc startWriteLoop(s: UtpSocket) =
|
proc startWriteLoop(s: UtpSocket) =
|
||||||
s.writeLoop = writeLoop(s)
|
s.writeLoop = writeLoop(s)
|
||||||
|
|
||||||
proc new[A](
|
proc new[A](
|
||||||
|
@ -689,7 +689,7 @@ proc newIncomingSocket*[A](
|
||||||
): UtpSocket[A] =
|
): UtpSocket[A] =
|
||||||
let initialSeqNr = randUint16(rng)
|
let initialSeqNr = randUint16(rng)
|
||||||
|
|
||||||
let (initialState, initialTimeout) =
|
let (initialState, initialTimeout) =
|
||||||
if (cfg.incomingSocketReceiveTimeout.isNone()):
|
if (cfg.incomingSocketReceiveTimeout.isNone()):
|
||||||
# it does not matter what timeout value we put here, as socket will be in
|
# it does not matter what timeout value we put here, as socket will be in
|
||||||
# connected state without outgoing packets in buffer so any timeout hit will
|
# connected state without outgoing packets in buffer so any timeout hit will
|
||||||
|
@ -724,7 +724,7 @@ proc startOutgoingSocket*(socket: UtpSocket): Future[void] {.async.} =
|
||||||
socket.startTimeoutLoop()
|
socket.startTimeoutLoop()
|
||||||
await socket.sendData(outgoingPacket.packetBytes)
|
await socket.sendData(outgoingPacket.packetBytes)
|
||||||
await socket.connectionFuture
|
await socket.connectionFuture
|
||||||
|
|
||||||
proc startIncomingSocket*(socket: UtpSocket) {.async.} =
|
proc startIncomingSocket*(socket: UtpSocket) {.async.} =
|
||||||
# Make sure ack was flushed before moving forward
|
# Make sure ack was flushed before moving forward
|
||||||
await socket.sendAck()
|
await socket.sendAck()
|
||||||
|
@ -769,7 +769,7 @@ proc updateTimeouts(socket: UtpSocket, timeSent: Moment, currentTime: Moment) =
|
||||||
## delta = rtt - packet_rtt
|
## delta = rtt - packet_rtt
|
||||||
## rtt_var += (abs(delta) - rtt_var) / 4;
|
## rtt_var += (abs(delta) - rtt_var) / 4;
|
||||||
## rtt += (packet_rtt - rtt) / 8;
|
## rtt += (packet_rtt - rtt) / 8;
|
||||||
|
|
||||||
let packetRtt = currentTime - timeSent
|
let packetRtt = currentTime - timeSent
|
||||||
|
|
||||||
if (socket.rtt.isZero):
|
if (socket.rtt.isZero):
|
||||||
|
@ -787,7 +787,7 @@ proc updateTimeouts(socket: UtpSocket, timeSent: Moment, currentTime: Moment) =
|
||||||
|
|
||||||
socket.rttVar = newVar
|
socket.rttVar = newVar
|
||||||
socket.rtt = newRtt
|
socket.rtt = newRtt
|
||||||
|
|
||||||
# according to spec it should be: timeout = max(rtt + rtt_var * 4, 500)
|
# according to spec it should be: timeout = max(rtt + rtt_var * 4, 500)
|
||||||
# but usually spec lags after implementation so milliseconds(1000) is used
|
# but usually spec lags after implementation so milliseconds(1000) is used
|
||||||
socket.rto = max(socket.rtt + (socket.rttVar * 4), milliseconds(1000))
|
socket.rto = max(socket.rtt + (socket.rttVar * 4), milliseconds(1000))
|
||||||
|
@ -798,15 +798,15 @@ proc ackPacket(socket: UtpSocket, seqNr: uint16, currentTime: Moment): AckResult
|
||||||
let packet = packetOpt.get()
|
let packet = packetOpt.get()
|
||||||
|
|
||||||
if packet.transmissions == 0:
|
if packet.transmissions == 0:
|
||||||
# according to reference impl it can happen when we get an ack_nr that
|
# according to reference impl it can happen when we get an ack_nr that
|
||||||
# does not exceed what we have stuffed into the outgoing buffer,
|
# does not exceed what we have stuffed into the outgoing buffer,
|
||||||
# but does exceed what we have sent
|
# but does exceed what we have sent
|
||||||
# TODO analyze if this case can happen with our impl
|
# TODO analyze if this case can happen with our impl
|
||||||
return PacketNotSentYet
|
return PacketNotSentYet
|
||||||
|
|
||||||
socket.outBuffer.delete(seqNr)
|
socket.outBuffer.delete(seqNr)
|
||||||
|
|
||||||
# from spec: The rtt and rtt_var is only updated for packets that were sent only once.
|
# from spec: The rtt and rtt_var is only updated for packets that were sent only once.
|
||||||
# This avoids problems with figuring out which packet was acked, the first or the second one.
|
# This avoids problems with figuring out which packet was acked, the first or the second one.
|
||||||
# it is standard solution to retransmission ambiguity problem
|
# it is standard solution to retransmission ambiguity problem
|
||||||
if packet.transmissions == 1:
|
if packet.transmissions == 1:
|
||||||
|
@ -830,7 +830,7 @@ proc ackPacket(socket: UtpSocket, seqNr: uint16, currentTime: Moment): AckResult
|
||||||
proc ackPackets(socket: UtpSocket, nrPacketsToAck: uint16, currentTime: Moment) =
|
proc ackPackets(socket: UtpSocket, nrPacketsToAck: uint16, currentTime: Moment) =
|
||||||
## Ack packets in outgoing buffer based on ack number in the received packet
|
## Ack packets in outgoing buffer based on ack number in the received packet
|
||||||
var i = 0
|
var i = 0
|
||||||
while i < int(nrPacketsToack):
|
while i < int(nrPacketsToAck):
|
||||||
let result = socket.ackPacket(socket.seqNr - socket.curWindowPackets, currentTime)
|
let result = socket.ackPacket(socket.seqNr - socket.curWindowPackets, currentTime)
|
||||||
case result
|
case result
|
||||||
of PacketAcked:
|
of PacketAcked:
|
||||||
|
@ -847,7 +847,7 @@ proc calculateAckedbytes(socket: UtpSocket, nrPacketsToAck: uint16, now: Moment)
|
||||||
var i: uint16 = 0
|
var i: uint16 = 0
|
||||||
var ackedBytes: uint32 = 0
|
var ackedBytes: uint32 = 0
|
||||||
var minRtt: Duration = InfiniteDuration
|
var minRtt: Duration = InfiniteDuration
|
||||||
while i < nrPacketsToack:
|
while i < nrPacketsToAck:
|
||||||
let seqNr = socket.seqNr - socket.curWindowPackets + i
|
let seqNr = socket.seqNr - socket.curWindowPackets + i
|
||||||
let packetOpt = socket.outBuffer.get(seqNr)
|
let packetOpt = socket.outBuffer.get(seqNr)
|
||||||
if (packetOpt.isSome() and packetOpt.unsafeGet().transmissions > 0):
|
if (packetOpt.isSome() and packetOpt.unsafeGet().transmissions > 0):
|
||||||
|
@ -914,7 +914,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
if acks > socket.curWindowPackets:
|
if acks > socket.curWindowPackets:
|
||||||
# this case happens if the we already received this ack nr
|
# this case happens if the we already received this ack nr
|
||||||
acks = 0
|
acks = 0
|
||||||
|
|
||||||
# If packet is totally of the mark short circout the processing
|
# If packet is totally of the mark short circout the processing
|
||||||
if pastExpected >= reorderBufferMaxSize:
|
if pastExpected >= reorderBufferMaxSize:
|
||||||
notice "Received packet is totally of the mark"
|
notice "Received packet is totally of the mark"
|
||||||
|
@ -924,13 +924,13 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
# TODO caluclate bytes acked by selective acks here (if thats the case)
|
# TODO caluclate bytes acked by selective acks here (if thats the case)
|
||||||
|
|
||||||
let sentTimeRemote = p.header.timestamp
|
let sentTimeRemote = p.header.timestamp
|
||||||
|
|
||||||
# we are using uint32 not a Duration, to wrap a round in case of
|
# we are using uint32 not a Duration, to wrap a round in case of
|
||||||
# sentTimeRemote > receipTimestamp. This can happen as local and remote
|
# sentTimeRemote > receipTimestamp. This can happen as local and remote
|
||||||
# clock can be not synchornized or even using different system clock.
|
# clock can be not synchornized or even using different system clock.
|
||||||
# i.e this number itself does not tell anything and is only used to feedback it
|
# i.e this number itself does not tell anything and is only used to feedback it
|
||||||
# to remote peer with each sent packet
|
# to remote peer with each sent packet
|
||||||
let remoteDelay =
|
let remoteDelay =
|
||||||
if (sentTimeRemote == 0):
|
if (sentTimeRemote == 0):
|
||||||
0'u32
|
0'u32
|
||||||
else:
|
else:
|
||||||
|
@ -946,8 +946,8 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
# remote new delay base is less than previous
|
# remote new delay base is less than previous
|
||||||
# shift our delay base in other direction to take clock skew into account
|
# shift our delay base in other direction to take clock skew into account
|
||||||
# but no more than 10ms
|
# but no more than 10ms
|
||||||
if (prevRemoteDelayBase != 0 and
|
if (prevRemoteDelayBase != 0 and
|
||||||
wrapCompareLess(socket.remoteHistogram.delayBase, prevRemoteDelayBase) and
|
wrapCompareLess(socket.remoteHistogram.delayBase, prevRemoteDelayBase) and
|
||||||
prevRemoteDelayBase - socket.remoteHistogram.delayBase <= 10000'u32):
|
prevRemoteDelayBase - socket.remoteHistogram.delayBase <= 10000'u32):
|
||||||
socket.ourHistogram.shift(prevRemoteDelayBase - socket.remoteHistogram.delayBase)
|
socket.ourHistogram.shift(prevRemoteDelayBase - socket.remoteHistogram.delayBase)
|
||||||
|
|
||||||
|
@ -960,7 +960,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
# adjust base delay if delay estimates exceeds rtt
|
# adjust base delay if delay estimates exceeds rtt
|
||||||
if (socket.ourHistogram.getValue() > minRtt):
|
if (socket.ourHistogram.getValue() > minRtt):
|
||||||
let diff = uint32((socket.ourHistogram.getValue() - minRtt).microseconds())
|
let diff = uint32((socket.ourHistogram.getValue() - minRtt).microseconds())
|
||||||
socket.ourHistogram.shift(diff)
|
socket.ourHistogram.shift(diff)
|
||||||
|
|
||||||
let (newMaxWindow, newSlowStartTreshold, newSlowStart) =
|
let (newMaxWindow, newSlowStartTreshold, newSlowStart) =
|
||||||
applyCongestionControl(
|
applyCongestionControl(
|
||||||
|
@ -985,7 +985,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
# when zeroWindowTimer will be hit and maxRemoteWindow still will be equal to 0
|
# when zeroWindowTimer will be hit and maxRemoteWindow still will be equal to 0
|
||||||
# then it will be reset to minimal value
|
# then it will be reset to minimal value
|
||||||
socket.zeroWindowTimer = timestampInfo.moment + socket.socketConfig.remoteWindowResetTimeout
|
socket.zeroWindowTimer = timestampInfo.moment + socket.socketConfig.remoteWindowResetTimeout
|
||||||
|
|
||||||
# socket.curWindowPackets == acks means that this packet acked all remaining packets
|
# socket.curWindowPackets == acks means that this packet acked all remaining packets
|
||||||
# including the sent fin packets
|
# including the sent fin packets
|
||||||
if (socket.finSent and socket.curWindowPackets == acks):
|
if (socket.finSent and socket.curWindowPackets == acks):
|
||||||
|
@ -998,7 +998,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
socket.destroy()
|
socket.destroy()
|
||||||
|
|
||||||
socket.ackPackets(acks, timestampInfo.moment)
|
socket.ackPackets(acks, timestampInfo.moment)
|
||||||
|
|
||||||
case p.header.pType
|
case p.header.pType
|
||||||
of ST_DATA, ST_FIN:
|
of ST_DATA, ST_FIN:
|
||||||
# To avoid amplification attacks, server socket is in SynRecv state until
|
# To avoid amplification attacks, server socket is in SynRecv state until
|
||||||
|
@ -1017,7 +1017,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
if (len(p.payload) > 0 and (not socket.readShutdown)):
|
if (len(p.payload) > 0 and (not socket.readShutdown)):
|
||||||
# we are getting in order data packet, we can flush data directly to the incoming buffer
|
# we are getting in order data packet, we can flush data directly to the incoming buffer
|
||||||
await upload(addr socket.buffer, unsafeAddr p.payload[0], p.payload.len())
|
await upload(addr socket.buffer, unsafeAddr p.payload[0], p.payload.len())
|
||||||
# Bytes have been passed to upper layer, we can increase number of last
|
# Bytes have been passed to upper layer, we can increase number of last
|
||||||
# acked packet
|
# acked packet
|
||||||
inc socket.ackNr
|
inc socket.ackNr
|
||||||
|
|
||||||
|
@ -1029,7 +1029,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
if ((not socket.reachedFin) and socket.gotFin and socket.eofPktNr == socket.ackNr):
|
if ((not socket.reachedFin) and socket.gotFin and socket.eofPktNr == socket.ackNr):
|
||||||
notice "Reached socket EOF"
|
notice "Reached socket EOF"
|
||||||
# In case of reaching eof, it is up to user of library what to to with
|
# In case of reaching eof, it is up to user of library what to to with
|
||||||
# it. With the current implementation, the most apropriate way would be to
|
# it. With the current implementation, the most apropriate way would be to
|
||||||
# destory it (as with our implementation we know that remote is destroying its acked fin)
|
# destory it (as with our implementation we know that remote is destroying its acked fin)
|
||||||
# as any other send will either generate timeout, or socket will be forcefully
|
# as any other send will either generate timeout, or socket will be forcefully
|
||||||
# closed by reset
|
# closed by reset
|
||||||
|
@ -1043,14 +1043,14 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
|
|
||||||
if socket.reorderCount == 0:
|
if socket.reorderCount == 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
let nextPacketNum = socket.ackNr + 1
|
let nextPacketNum = socket.ackNr + 1
|
||||||
|
|
||||||
let maybePacket = socket.inBuffer.get(nextPacketNum)
|
let maybePacket = socket.inBuffer.get(nextPacketNum)
|
||||||
|
|
||||||
if maybePacket.isNone():
|
if maybePacket.isNone():
|
||||||
break
|
break
|
||||||
|
|
||||||
let packet = maybePacket.unsafeGet()
|
let packet = maybePacket.unsafeGet()
|
||||||
|
|
||||||
if (len(packet.payload) > 0 and (not socket.readShutdown)):
|
if (len(packet.payload) > 0 and (not socket.readShutdown)):
|
||||||
|
@ -1066,7 +1066,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
# how many concurrent tasks there are and how to cancel them when socket
|
# how many concurrent tasks there are and how to cancel them when socket
|
||||||
# is closed
|
# is closed
|
||||||
asyncSpawn socket.sendAck()
|
asyncSpawn socket.sendAck()
|
||||||
|
|
||||||
# we got packet out of order
|
# we got packet out of order
|
||||||
else:
|
else:
|
||||||
notice "Got out of order packet"
|
notice "Got out of order packet"
|
||||||
|
@ -1075,7 +1075,7 @@ proc processPacket*(socket: UtpSocket, p: Packet) {.async.} =
|
||||||
notice "Got packet past eof"
|
notice "Got packet past eof"
|
||||||
return
|
return
|
||||||
|
|
||||||
# growing buffer before checking the packet is already there to avoid
|
# growing buffer before checking the packet is already there to avoid
|
||||||
# looking at older packet due to indices wrap aroud
|
# looking at older packet due to indices wrap aroud
|
||||||
socket.inBuffer.ensureSize(pkSeqNr + 1, pastExpected + 1)
|
socket.inBuffer.ensureSize(pkSeqNr + 1, pastExpected + 1)
|
||||||
|
|
||||||
|
@ -1143,14 +1143,14 @@ proc closeWait*(socket: UtpSocket) {.async.} =
|
||||||
socket.close()
|
socket.close()
|
||||||
await socket.closeEvent.wait()
|
await socket.closeEvent.wait()
|
||||||
|
|
||||||
proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] =
|
proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] =
|
||||||
let retFuture = newFuture[WriteResult]("UtpSocket.write")
|
let retFuture = newFuture[WriteResult]("UtpSocket.write")
|
||||||
|
|
||||||
if (socket.state != Connected):
|
if (socket.state != Connected):
|
||||||
let res = Result[int, WriteError].err(WriteError(kind: SocketNotWriteable, currentState: socket.state))
|
let res = Result[int, WriteError].err(WriteError(kind: SocketNotWriteable, currentState: socket.state))
|
||||||
retFuture.complete(res)
|
retFuture.complete(res)
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
# fin should be last packet received by remote side, therefore trying to write
|
# fin should be last packet received by remote side, therefore trying to write
|
||||||
# after sending fin is considered error
|
# after sending fin is considered error
|
||||||
if socket.sendFinRequested or socket.finSent:
|
if socket.sendFinRequested or socket.finSent:
|
||||||
|
@ -1159,12 +1159,12 @@ proc write*(socket: UtpSocket, data: seq[byte]): Future[WriteResult] =
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
var bytesWritten = 0
|
var bytesWritten = 0
|
||||||
|
|
||||||
if len(data) == 0:
|
if len(data) == 0:
|
||||||
let res = Result[int, WriteError].ok(bytesWritten)
|
let res = Result[int, WriteError].ok(bytesWritten)
|
||||||
retFuture.complete(res)
|
retFuture.complete(res)
|
||||||
return retFuture
|
return retFuture
|
||||||
|
|
||||||
try:
|
try:
|
||||||
socket.writeQueue.putNoWait(WriteRequest(kind: Data, data: data, writer: retFuture))
|
socket.writeQueue.putNoWait(WriteRequest(kind: Data, data: data, writer: retFuture))
|
||||||
except AsyncQueueFullError as e:
|
except AsyncQueueFullError as e:
|
||||||
|
@ -1209,7 +1209,7 @@ proc read*(socket: UtpSocket): Future[seq[byte]] {.async.}=
|
||||||
var bytes = newSeq[byte]()
|
var bytes = newSeq[byte]()
|
||||||
|
|
||||||
readLoop():
|
readLoop():
|
||||||
if socket.readingClosed():
|
if socket.readingClosed():
|
||||||
(0, true)
|
(0, true)
|
||||||
else:
|
else:
|
||||||
let count = socket.buffer.dataLen()
|
let count = socket.buffer.dataLen()
|
||||||
|
@ -1237,7 +1237,7 @@ proc numOfBytesInFlight*(socket: UtpSocket): uint32 = socket.sendBufferTracker.c
|
||||||
# It throws assertion error when number of elements in buffer do not equal kept counter
|
# It throws assertion error when number of elements in buffer do not equal kept counter
|
||||||
proc numPacketsInReordedBuffer*(socket: UtpSocket): int =
|
proc numPacketsInReordedBuffer*(socket: UtpSocket): int =
|
||||||
var num = 0
|
var num = 0
|
||||||
for e in socket.inBUffer.items():
|
for e in socket.inBuffer.items():
|
||||||
if e.isSome():
|
if e.isSome():
|
||||||
inc num
|
inc num
|
||||||
doAssert(num == int(socket.reorderCount))
|
doAssert(num == int(socket.reorderCount))
|
||||||
|
|
|
@ -29,7 +29,7 @@ procSuite "SqStoreRef":
|
||||||
|
|
||||||
let insertStmt = db.prepareStmt(
|
let insertStmt = db.prepareStmt(
|
||||||
"INSERT INTO records(value) VALUES (?);",
|
"INSERT INTO records(value) VALUES (?);",
|
||||||
openarray[byte], void).get
|
openArray[byte], void).get
|
||||||
|
|
||||||
let insert1Res = insertStmt.exec [byte 1, 2, 3, 4]
|
let insert1Res = insertStmt.exec [byte 1, 2, 3, 4]
|
||||||
let insert2Res = insertStmt.exec @[]
|
let insert2Res = insertStmt.exec @[]
|
||||||
|
@ -63,11 +63,11 @@ procSuite "SqStoreRef":
|
||||||
|
|
||||||
let selectRangeStmt = db.prepareStmt(
|
let selectRangeStmt = db.prepareStmt(
|
||||||
"SELECT value FROM records WHERE key >= ? and key < ?;",
|
"SELECT value FROM records WHERE key >= ? and key < ?;",
|
||||||
(int64, int64), openarray[byte]).get
|
(int64, int64), openArray[byte]).get
|
||||||
|
|
||||||
block:
|
block:
|
||||||
var allBytes = newSeq[byte]()
|
var allBytes = newSeq[byte]()
|
||||||
let selectRangeRes = selectRangeStmt.exec((0'i64, 5'i64)) do (bytes: openarray[byte]) {.gcsafe.}:
|
let selectRangeRes = selectRangeStmt.exec((0'i64, 5'i64)) do (bytes: openArray[byte]) {.gcsafe.}:
|
||||||
allBytes.add byte(bytes.len)
|
allBytes.add byte(bytes.len)
|
||||||
allBytes.add bytes
|
allBytes.add bytes
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ procSuite "SqStoreRef":
|
||||||
0,
|
0,
|
||||||
1, 5]
|
1, 5]
|
||||||
block:
|
block:
|
||||||
let selectRangeRes = selectRangeStmt.exec((10'i64, 20'i64)) do (bytes: openarray[byte]):
|
let selectRangeRes = selectRangeStmt.exec((10'i64, 20'i64)) do (bytes: openArray[byte]):
|
||||||
echo "Got unexpected bytes: ", bytes
|
echo "Got unexpected bytes: ", bytes
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -106,7 +106,7 @@ procSuite "SqStoreRef":
|
||||||
]
|
]
|
||||||
|
|
||||||
test "Tuple with byte arrays support":
|
test "Tuple with byte arrays support":
|
||||||
# openarray[byte] requires either Nim 1.4
|
# openArray[byte] requires either Nim 1.4
|
||||||
# or hardcoding the seq[byte] and array[N, byte] paths
|
# or hardcoding the seq[byte] and array[N, byte] paths
|
||||||
let db = SqStoreRef.init("", "test", inMemory = true)[]
|
let db = SqStoreRef.init("", "test", inMemory = true)[]
|
||||||
defer: db.close()
|
defer: db.close()
|
||||||
|
|
|
@ -6,14 +6,14 @@ test:
|
||||||
block:
|
block:
|
||||||
let decoded = decodeMessage(payload)
|
let decoded = decodeMessage(payload)
|
||||||
|
|
||||||
if decoded.isOK():
|
if decoded.isOk():
|
||||||
let message = decoded.get()
|
let message = decoded.get()
|
||||||
var encoded: seq[byte]
|
var encoded: seq[byte]
|
||||||
case message.kind
|
case message.kind
|
||||||
of unused: break
|
of unused: break
|
||||||
of ping: encoded = encodeMessage(message.ping, message.reqId)
|
of ping: encoded = encodeMessage(message.ping, message.reqId)
|
||||||
of pong: encoded = encodeMessage(message.pong, message.reqId)
|
of pong: encoded = encodeMessage(message.pong, message.reqId)
|
||||||
of findNode: encoded = encodeMessage(message.findNode, message.reqId)
|
of findnode: encoded = encodeMessage(message.findNode, message.reqId)
|
||||||
of nodes: encoded = encodeMessage(message.nodes, message.reqId)
|
of nodes: encoded = encodeMessage(message.nodes, message.reqId)
|
||||||
of talkreq: encoded = encodeMessage(message.talkreq, message.reqId)
|
of talkreq: encoded = encodeMessage(message.talkreq, message.reqId)
|
||||||
of talkresp: encoded = encodeMessage(message.talkresp, message.reqId)
|
of talkresp: encoded = encodeMessage(message.talkresp, message.reqId)
|
||||||
|
|
|
@ -10,7 +10,7 @@ type
|
||||||
test1: uint32
|
test1: uint32
|
||||||
test2: string
|
test2: string
|
||||||
|
|
||||||
template testDecode(payload: openarray, T: type) =
|
template testDecode(payload: openArray, T: type) =
|
||||||
try:
|
try:
|
||||||
discard rlp.decode(payload, T)
|
discard rlp.decode(payload, T)
|
||||||
except RlpError as e:
|
except RlpError as e:
|
||||||
|
|
|
@ -245,7 +245,7 @@ suite "KeyFile test suite":
|
||||||
jobject = createKeyFileJson(seckey1, "miawmiawcat", 3, AES128CTR, SCRYPT)[]
|
jobject = createKeyFileJson(seckey1, "miawmiawcat", 3, AES128CTR, SCRYPT)[]
|
||||||
privKey = decodeKeyFileJson(jobject, "miawmiawcat")[]
|
privKey = decodeKeyFileJson(jobject, "miawmiawcat")[]
|
||||||
|
|
||||||
check privKey.toRaw == secKey1.toRaw
|
check privKey.toRaw == seckey1.toRaw
|
||||||
|
|
||||||
test "Load non-existent pathname test":
|
test "Load non-existent pathname test":
|
||||||
check:
|
check:
|
||||||
|
|
|
@ -16,7 +16,7 @@ import
|
||||||
|
|
||||||
from strutils import toLowerAscii
|
from strutils import toLowerAscii
|
||||||
|
|
||||||
proc compare(x: openarray[byte], y: openarray[byte]): bool =
|
proc compare(x: openArray[byte], y: openArray[byte]): bool =
|
||||||
result = len(x) == len(y)
|
result = len(x) == len(y)
|
||||||
if result:
|
if result:
|
||||||
for i in 0..(len(x) - 1):
|
for i in 0..(len(x) - 1):
|
||||||
|
|
|
@ -11,8 +11,8 @@ proc localAddress*(port: int): Address =
|
||||||
|
|
||||||
proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey,
|
proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey,
|
||||||
address: Address,
|
address: Address,
|
||||||
bootstrapRecords: openarray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
localEnrFields: openarray[(string, seq[byte])] = [],
|
localEnrFields: openArray[(string, seq[byte])] = [],
|
||||||
previousRecord = none[enr.Record]()):
|
previousRecord = none[enr.Record]()):
|
||||||
discv5_protocol.Protocol =
|
discv5_protocol.Protocol =
|
||||||
# set bucketIpLimit to allow bucket split
|
# set bucketIpLimit to allow bucket split
|
||||||
|
@ -30,13 +30,13 @@ proc initDiscoveryNode*(rng: ref BrHmacDrbgContext, privKey: PrivateKey,
|
||||||
|
|
||||||
result.open()
|
result.open()
|
||||||
|
|
||||||
proc nodeIdInNodes*(id: NodeId, nodes: openarray[Node]): bool =
|
proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool =
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
if id == n.id: return true
|
if id == n.id: return true
|
||||||
|
|
||||||
proc generateNode*(privKey: PrivateKey, port: int = 20302,
|
proc generateNode*(privKey: PrivateKey, port: int = 20302,
|
||||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1"),
|
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1"),
|
||||||
localEnrFields: openarray[FieldPair] = []): Node =
|
localEnrFields: openArray[FieldPair] = []): Node =
|
||||||
let port = Port(port)
|
let port = Port(port)
|
||||||
let enr = enr.Record.init(1, privKey, some(ip),
|
let enr = enr.Record.init(1, privKey, some(ip),
|
||||||
some(port), some(port), localEnrFields).expect("Properly intialized private key")
|
some(port), some(port), localEnrFields).expect("Properly intialized private key")
|
||||||
|
|
|
@ -33,7 +33,7 @@ proc packData(payload: openArray[byte], pk: PrivateKey): seq[byte] =
|
||||||
msgHash = keccak256.digest(signature & payloadSeq)
|
msgHash = keccak256.digest(signature & payloadSeq)
|
||||||
result = @(msgHash.data) & signature & payloadSeq
|
result = @(msgHash.data) & signature & payloadSeq
|
||||||
|
|
||||||
proc nodeIdInNodes(id: NodeId, nodes: openarray[Node]): bool =
|
proc nodeIdInNodes(id: NodeId, nodes: openArray[Node]): bool =
|
||||||
for n in nodes:
|
for n in nodes:
|
||||||
if id == n.id: return true
|
if id == n.id: return true
|
||||||
|
|
||||||
|
|
|
@ -676,7 +676,7 @@ suite "Discovery v5 Tests":
|
||||||
# Check handshake duplicates
|
# Check handshake duplicates
|
||||||
check receiveNode.codec.handshakes.len == 1
|
check receiveNode.codec.handshakes.len == 1
|
||||||
# Check if it is for the first packet that a handshake is stored
|
# Check if it is for the first packet that a handshake is stored
|
||||||
let key = HandShakeKey(nodeId: sendNode.id, address: a)
|
let key = HandshakeKey(nodeId: sendNode.id, address: a)
|
||||||
check receiveNode.codec.handshakes[key].whoareyouData.requestNonce ==
|
check receiveNode.codec.handshakes[key].whoareyouData.requestNonce ==
|
||||||
firstRequestNonce
|
firstRequestNonce
|
||||||
|
|
||||||
|
|
|
@ -292,7 +292,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
|
||||||
let decoded = codecB.decodePacket(nodeA.address.get(),
|
let decoded = codecB.decodePacket(nodeA.address.get(),
|
||||||
hexToSeqByte(encodedPacket))
|
hexToSeqByte(encodedPacket))
|
||||||
check:
|
check:
|
||||||
decoded.isOK()
|
decoded.isOk()
|
||||||
decoded.get().messageOpt.isSome()
|
decoded.get().messageOpt.isSome()
|
||||||
decoded.get().messageOpt.get().reqId.id == hexToSeqByte(pingReqId)
|
decoded.get().messageOpt.get().reqId.id == hexToSeqByte(pingReqId)
|
||||||
decoded.get().messageOpt.get().kind == ping
|
decoded.get().messageOpt.get().kind == ping
|
||||||
|
@ -313,7 +313,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
|
||||||
hexToSeqByte(encodedPacket))
|
hexToSeqByte(encodedPacket))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
decoded.isOK()
|
decoded.isOk()
|
||||||
decoded.get().flag == Flag.Whoareyou
|
decoded.get().flag == Flag.Whoareyou
|
||||||
decoded.get().whoareyou.requestNonce == hexToByteArray[gcmNonceSize](whoareyouRequestNonce)
|
decoded.get().whoareyou.requestNonce == hexToByteArray[gcmNonceSize](whoareyouRequestNonce)
|
||||||
decoded.get().whoareyou.idNonce == hexToByteArray[idNonceSize](whoareyouIdNonce)
|
decoded.get().whoareyou.idNonce == hexToByteArray[idNonceSize](whoareyouIdNonce)
|
||||||
|
@ -352,7 +352,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
|
||||||
challengeData: hexToSeqByte(whoareyouChallengeData))
|
challengeData: hexToSeqByte(whoareyouChallengeData))
|
||||||
pubkey = some(privKeyA.toPublicKey())
|
pubkey = some(privKeyA.toPublicKey())
|
||||||
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
||||||
key = HandShakeKey(nodeId: nodeA.id, address: nodeA.address.get())
|
key = HandshakeKey(nodeId: nodeA.id, address: nodeA.address.get())
|
||||||
|
|
||||||
check: not codecB.handshakes.hasKeyOrPut(key, challenge)
|
check: not codecB.handshakes.hasKeyOrPut(key, challenge)
|
||||||
|
|
||||||
|
@ -402,7 +402,7 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
|
||||||
challengeData: hexToSeqByte(whoareyouChallengeData))
|
challengeData: hexToSeqByte(whoareyouChallengeData))
|
||||||
pubkey = none(PublicKey)
|
pubkey = none(PublicKey)
|
||||||
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
challenge = Challenge(whoareyouData: whoareyouData, pubkey: pubkey)
|
||||||
key = HandShakeKey(nodeId: nodeA.id, address: nodeA.address.get())
|
key = HandshakeKey(nodeId: nodeA.id, address: nodeA.address.get())
|
||||||
|
|
||||||
check: not codecB.handshakes.hasKeyOrPut(key, challenge)
|
check: not codecB.handshakes.hasKeyOrPut(key, challenge)
|
||||||
|
|
||||||
|
@ -520,7 +520,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
|
||||||
|
|
||||||
let decoded = codecB.decodePacket(nodeA.address.get(), data)
|
let decoded = codecB.decodePacket(nodeA.address.get(), data)
|
||||||
|
|
||||||
let key = HandShakeKey(nodeId: nodeB.id, address: nodeB.address.get())
|
let key = HandshakeKey(nodeId: nodeB.id, address: nodeB.address.get())
|
||||||
var challenge: Challenge
|
var challenge: Challenge
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
|
|
@ -14,7 +14,7 @@ import
|
||||||
nimcrypto/[utils, sha2, hmac, rijndael],
|
nimcrypto/[utils, sha2, hmac, rijndael],
|
||||||
../../eth/keys, ../../eth/p2p/ecies
|
../../eth/keys, ../../eth/p2p/ecies
|
||||||
|
|
||||||
proc compare[A, B](x: openarray[A], y: openarray[B], s: int = 0): bool =
|
proc compare[A, B](x: openArray[A], y: openArray[B], s: int = 0): bool =
|
||||||
result = true
|
result = true
|
||||||
doAssert(s >= 0)
|
doAssert(s >= 0)
|
||||||
var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y)))
|
var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y)))
|
||||||
|
|
|
@ -6,7 +6,7 @@ import
|
||||||
../../eth/keys, ../../eth/p2p/discoveryv5/[routing_table, node, enr],
|
../../eth/keys, ../../eth/p2p/discoveryv5/[routing_table, node, enr],
|
||||||
./discv5_test_helper
|
./discv5_test_helper
|
||||||
|
|
||||||
func customDistance*(a, b: NodeId): Uint256 =
|
func customDistance*(a, b: NodeId): UInt256 =
|
||||||
if a >= b:
|
if a >= b:
|
||||||
a - b
|
a - b
|
||||||
else:
|
else:
|
||||||
|
@ -31,8 +31,8 @@ suite "Routing Table Tests":
|
||||||
bucketIpLimit: BUCKET_SIZE + REPLACEMENT_CACHE_SIZE + 1)
|
bucketIpLimit: BUCKET_SIZE + REPLACEMENT_CACHE_SIZE + 1)
|
||||||
|
|
||||||
let customDistanceCalculator = DistanceCalculator(
|
let customDistanceCalculator = DistanceCalculator(
|
||||||
calculateDistance: customDistance,
|
calculateDistance: customDistance,
|
||||||
calculateLogDistance: customLogDistance,
|
calculateLogDistance: customLogDistance,
|
||||||
calculateIdAtDistance: customIdAdDist)
|
calculateIdAtDistance: customIdAdDist)
|
||||||
|
|
||||||
test "Add local node":
|
test "Add local node":
|
||||||
|
|
|
@ -226,11 +226,11 @@ suite "Whisper envelope":
|
||||||
var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef])
|
var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef])
|
||||||
# PoW calculation with no leading zeroes
|
# PoW calculation with no leading zeroes
|
||||||
env.nonce = 100000
|
env.nonce = 100000
|
||||||
check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6",
|
check hashAndPow(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6",
|
||||||
0.07692307692307693)
|
0.07692307692307693)
|
||||||
# PoW calculation with 8 leading zeroes
|
# PoW calculation with 8 leading zeroes
|
||||||
env.nonce = 276
|
env.nonce = 276
|
||||||
check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C",
|
check hashAndPow(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C",
|
||||||
19.692307692307693)
|
19.692307692307693)
|
||||||
|
|
||||||
suite "Whisper queue":
|
suite "Whisper queue":
|
||||||
|
|
|
@ -105,7 +105,7 @@ suite "test api usage":
|
||||||
rlp.listElem(2).toString == "Donec ligula tortor, egestas eu est vitae"
|
rlp.listElem(2).toString == "Donec ligula tortor, egestas eu est vitae"
|
||||||
|
|
||||||
# test creating RLPs from other RLPs
|
# test creating RLPs from other RLPs
|
||||||
var list = rlpFromBytes encodeList(rlp.listELem(1), rlp.listELem(0))
|
var list = rlpFromBytes encodeList(rlp.listElem(1), rlp.listElem(0))
|
||||||
|
|
||||||
# test that iteration with enterList/skipElem works as expected
|
# test that iteration with enterList/skipElem works as expected
|
||||||
doAssert list.enterList # We already know that we are working with a list
|
doAssert list.enterList # We already know that we are working with a list
|
||||||
|
@ -191,7 +191,7 @@ suite "test api usage":
|
||||||
Inf, NegInf, NaN]:
|
Inf, NegInf, NaN]:
|
||||||
|
|
||||||
template isNaN(n): bool =
|
template isNaN(n): bool =
|
||||||
classify(n) == fcNaN
|
classify(n) == fcNan
|
||||||
|
|
||||||
template chk(input) =
|
template chk(input) =
|
||||||
let restored = decode(encode(input), float64)
|
let restored = decode(encode(input), float64)
|
||||||
|
|
|
@ -14,7 +14,7 @@ type
|
||||||
proc cmp(lhs, rhs: TestOp): int = cmp(lhs.idx, rhs.idx)
|
proc cmp(lhs, rhs: TestOp): int = cmp(lhs.idx, rhs.idx)
|
||||||
proc `<=`(lhs, rhs: TestOp): bool = lhs.idx <= rhs.idx
|
proc `<=`(lhs, rhs: TestOp): bool = lhs.idx <= rhs.idx
|
||||||
|
|
||||||
proc runSingleTest(testSequence: openarray[TestOp],
|
proc runSingleTest(testSequence: openArray[TestOp],
|
||||||
secureMode: bool,
|
secureMode: bool,
|
||||||
expectedRootHash: string): bool =
|
expectedRootHash: string): bool =
|
||||||
var
|
var
|
||||||
|
|
|
@ -21,8 +21,8 @@ proc localAddress*(port: int): Address =
|
||||||
proc initDiscoveryNode*(rng: ref BrHmacDrbgContext,
|
proc initDiscoveryNode*(rng: ref BrHmacDrbgContext,
|
||||||
privKey: PrivateKey,
|
privKey: PrivateKey,
|
||||||
address: Address,
|
address: Address,
|
||||||
bootstrapRecords: openarray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
localEnrFields: openarray[(string, seq[byte])] = [],
|
localEnrFields: openArray[(string, seq[byte])] = [],
|
||||||
previousRecord = none[enr.Record]()): discv5_protocol.Protocol =
|
previousRecord = none[enr.Record]()): discv5_protocol.Protocol =
|
||||||
# set bucketIpLimit to allow bucket split
|
# set bucketIpLimit to allow bucket split
|
||||||
let tableIpLimits = TableIpLimits(tableIpLimit: 1000, bucketIpLimit: 24)
|
let tableIpLimits = TableIpLimits(tableIpLimit: 1000, bucketIpLimit: 24)
|
||||||
|
@ -213,7 +213,7 @@ procSuite "Utp protocol over discovery v5 tests":
|
||||||
|
|
||||||
check:
|
check:
|
||||||
wResult.isOk()
|
wResult.isOk()
|
||||||
|
|
||||||
let readData = await clientSocket.read(len(serverData))
|
let readData = await clientSocket.read(len(serverData))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
|
|
@ -24,9 +24,9 @@ suite "Utp packets encoding/decoding":
|
||||||
check:
|
check:
|
||||||
len(encoded) == 20
|
len(encoded) == 20
|
||||||
decoded.isOk()
|
decoded.isOk()
|
||||||
|
|
||||||
let synPacketDec = decoded.get()
|
let synPacketDec = decoded.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
synPacketDec == synPacket
|
synPacketDec == synPacket
|
||||||
|
|
||||||
|
@ -38,9 +38,9 @@ suite "Utp packets encoding/decoding":
|
||||||
check:
|
check:
|
||||||
len(encoded) == 20
|
len(encoded) == 20
|
||||||
decoded.isOk()
|
decoded.isOk()
|
||||||
|
|
||||||
let finPacketDec = decoded.get()
|
let finPacketDec = decoded.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
finPacketDec == finPacket
|
finPacketDec == finPacket
|
||||||
|
|
||||||
|
@ -52,9 +52,9 @@ suite "Utp packets encoding/decoding":
|
||||||
check:
|
check:
|
||||||
len(encoded) == 20
|
len(encoded) == 20
|
||||||
decoded.isOk()
|
decoded.isOk()
|
||||||
|
|
||||||
let resetPacketDec = decoded.get()
|
let resetPacketDec = decoded.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
resetPacketDec == resetPacket
|
resetPacketDec == resetPacket
|
||||||
|
|
||||||
|
@ -66,9 +66,9 @@ suite "Utp packets encoding/decoding":
|
||||||
check:
|
check:
|
||||||
len(encoded) == 20
|
len(encoded) == 20
|
||||||
decoded.isOk()
|
decoded.isOk()
|
||||||
|
|
||||||
let ackPacketDec = decoded.get()
|
let ackPacketDec = decoded.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
ackPacketDec == ackPacket
|
ackPacketDec == ackPacket
|
||||||
|
|
||||||
|
@ -81,9 +81,9 @@ suite "Utp packets encoding/decoding":
|
||||||
check:
|
check:
|
||||||
len(encoded) == 26
|
len(encoded) == 26
|
||||||
decoded.isOk()
|
decoded.isOk()
|
||||||
|
|
||||||
let ackPacketDec = decoded.get()
|
let ackPacketDec = decoded.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
ackPacketDec == ackPacket
|
ackPacketDec == ackPacket
|
||||||
ackPacketDec.eack.isSome()
|
ackPacketDec.eack.isSome()
|
||||||
|
@ -112,7 +112,7 @@ suite "Utp packets encoding/decoding":
|
||||||
# delete last byte, now packet is to short
|
# delete last byte, now packet is to short
|
||||||
encoded3.del(encoded3.high)
|
encoded3.del(encoded3.high)
|
||||||
let err3 = decodePacket(encoded3)
|
let err3 = decodePacket(encoded3)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
err3.isErr()
|
err3.isErr()
|
||||||
err3.error() == "Packet too short for selective ack extension"
|
err3.error() == "Packet too short for selective ack extension"
|
||||||
|
|
|
@ -36,13 +36,13 @@ proc allowOneIdCallback(allowedId: uint16): AllowConnectionCallback[TransportAdd
|
||||||
connectionId == allowedId
|
connectionId == allowedId
|
||||||
)
|
)
|
||||||
|
|
||||||
proc transferData(sender: UtpSocket[TransportAddress], receiver: UtpSocket[TransportAddress], data: seq[byte]): Future[seq[byte]] {.async.}=
|
proc transferData(sender: UtpSocket[TransportAddress], receiver: UtpSocket[TransportAddress], data: seq[byte]): Future[seq[byte]] {.async.} =
|
||||||
let bytesWritten = await sender.write(data)
|
let bytesWritten = await sender.write(data)
|
||||||
doAssert bytesWritten.get() == len(data)
|
doAssert bytesWritten.get() == len(data)
|
||||||
let received = await receiver.read(len(data))
|
let received = await receiver.read(len(data))
|
||||||
return received
|
return received
|
||||||
|
|
||||||
type
|
type
|
||||||
ClientServerScenario = object
|
ClientServerScenario = object
|
||||||
utp1: UtpProtocol
|
utp1: UtpProtocol
|
||||||
utp2: UtpProtocol
|
utp2: UtpProtocol
|
||||||
|
@ -94,7 +94,7 @@ proc init2ClientsServerScenario(): Future[TwoClientsServerScenario] {.async.} =
|
||||||
|
|
||||||
let address3 = initTAddress("127.0.0.1", 9081)
|
let address3 = initTAddress("127.0.0.1", 9081)
|
||||||
let utpProt3 = UtpProtocol.new(registerIncomingSocketCallback(serverSockets), address3)
|
let utpProt3 = UtpProtocol.new(registerIncomingSocketCallback(serverSockets), address3)
|
||||||
|
|
||||||
let clientSocket1 = await utpProt1.connectTo(address2)
|
let clientSocket1 = await utpProt1.connectTo(address2)
|
||||||
let clientSocket2 = await utpProt1.connectTo(address3)
|
let clientSocket2 = await utpProt1.connectTo(address3)
|
||||||
|
|
||||||
|
@ -135,13 +135,13 @@ procSuite "Utp protocol over udp tests":
|
||||||
let sock = sockResult.get()
|
let sock = sockResult.get()
|
||||||
# this future will be completed when we called accepted connection callback
|
# this future will be completed when we called accepted connection callback
|
||||||
await server2Called.wait()
|
await server2Called.wait()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
sock.isConnected()
|
sock.isConnected()
|
||||||
# after successful connection outgoing buffer should be empty as syn packet
|
# after successful connection outgoing buffer should be empty as syn packet
|
||||||
# should be correctly acked
|
# should be correctly acked
|
||||||
sock.numPacketsInOutGoingBuffer() == 0
|
sock.numPacketsInOutGoingBuffer() == 0
|
||||||
|
|
||||||
server2Called.isSet()
|
server2Called.isSet()
|
||||||
|
|
||||||
await utpProt1.shutdownWait()
|
await utpProt1.shutdownWait()
|
||||||
|
@ -155,7 +155,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
let address1 = initTAddress("127.0.0.1", 9080)
|
let address1 = initTAddress("127.0.0.1", 9080)
|
||||||
|
|
||||||
let connectionResult = await utpProt1.connectTo(address1)
|
let connectionResult = await utpProt1.connectTo(address1)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
connectionResult.isErr()
|
connectionResult.isErr()
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
connectionError.kind == ConnectionTimedOut
|
connectionError.kind == ConnectionTimedOut
|
||||||
|
|
||||||
await waitUntil(proc (): bool = utpProt1.openSockets() == 0)
|
await waitUntil(proc (): bool = utpProt1.openSockets() == 0)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
utpProt1.openSockets() == 0
|
utpProt1.openSockets() == 0
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
let utpProt1 = UtpProtocol.new(setAcceptedCallback(server1Called), address, SocketConfig.init(milliseconds(500)))
|
let utpProt1 = UtpProtocol.new(setAcceptedCallback(server1Called), address, SocketConfig.init(milliseconds(500)))
|
||||||
|
|
||||||
let address1 = initTAddress("127.0.0.1", 9080)
|
let address1 = initTAddress("127.0.0.1", 9080)
|
||||||
|
|
||||||
let futSock = utpProt1.connectTo(address1)
|
let futSock = utpProt1.connectTo(address1)
|
||||||
|
|
||||||
# waiting 400 milisecond will trigger at least one re-send
|
# waiting 400 milisecond will trigger at least one re-send
|
||||||
|
@ -188,11 +188,11 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
# this future will be completed when we called accepted connection callback
|
# this future will be completed when we called accepted connection callback
|
||||||
await server2Called.wait()
|
await server2Called.wait()
|
||||||
|
|
||||||
yield futSock
|
yield futSock
|
||||||
|
|
||||||
check:
|
check:
|
||||||
futSock.finished() and (not futsock.failed()) and (not futsock.cancelled())
|
futSock.finished() and (not futSock.failed()) and (not futSock.cancelled())
|
||||||
server2Called.isSet()
|
server2Called.isSet()
|
||||||
|
|
||||||
await utpProt1.shutdownWait()
|
await utpProt1.shutdownWait()
|
||||||
|
@ -200,7 +200,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
asyncTest "Success data transfer when data fits into one packet":
|
asyncTest "Success data transfer when data fits into one packet":
|
||||||
let s = await initClientServerScenario()
|
let s = await initClientServerScenario()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s.clientSocket.isConnected()
|
s.clientSocket.isConnected()
|
||||||
# after successful connection outgoing buffer should be empty as syn packet
|
# after successful connection outgoing buffer should be empty as syn packet
|
||||||
|
@ -227,7 +227,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
asyncTest "Success data transfer when data need to be sliced into multiple packets":
|
asyncTest "Success data transfer when data need to be sliced into multiple packets":
|
||||||
let s = await initClientServerScenario()
|
let s = await initClientServerScenario()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s.clientSocket.isConnected()
|
s.clientSocket.isConnected()
|
||||||
# after successful connection outgoing buffer should be empty as syn packet
|
# after successful connection outgoing buffer should be empty as syn packet
|
||||||
|
@ -238,7 +238,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
# 5000 bytes is over maximal packet size
|
# 5000 bytes is over maximal packet size
|
||||||
let bytesToTransfer = generateByteArray(rng[], 5000)
|
let bytesToTransfer = generateByteArray(rng[], 5000)
|
||||||
|
|
||||||
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
||||||
let bytesReceivedFromServer = await transferData(s.serverSocket, s.clientSocket, bytesToTransfer)
|
let bytesReceivedFromServer = await transferData(s.serverSocket, s.clientSocket, bytesToTransfer)
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
# 5000 bytes is over maximal packet size
|
# 5000 bytes is over maximal packet size
|
||||||
let bytesToTransfer = generateByteArray(rng[], 5000)
|
let bytesToTransfer = generateByteArray(rng[], 5000)
|
||||||
|
|
||||||
let written = await s.clientSocket.write(bytesToTransfer)
|
let written = await s.clientSocket.write(bytesToTransfer)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -281,7 +281,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
written1.get() == len(bytesToTransfer)
|
written1.get() == len(bytesToTransfer)
|
||||||
|
|
||||||
let bytesReceived = await s.serverSocket.read(len(bytesToTransfer) + len(bytesToTransfer1))
|
let bytesReceived = await s.serverSocket.read(len(bytesToTransfer) + len(bytesToTransfer1))
|
||||||
|
|
||||||
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
||||||
await waitUntil(proc (): bool = s.clientSocket.numPacketsInOutGoingBuffer() == 0)
|
await waitUntil(proc (): bool = s.clientSocket.numPacketsInOutGoingBuffer() == 0)
|
||||||
|
|
||||||
|
@ -293,20 +293,20 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
asyncTest "Success data transfers from multiple clients":
|
asyncTest "Success data transfers from multiple clients":
|
||||||
let s = await init2ClientsServerScenario()
|
let s = await init2ClientsServerScenario()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s.clientSocket1.isConnected()
|
s.clientSocket1.isConnected()
|
||||||
s.clientSocket2.isConnected()
|
s.clientSocket2.isConnected()
|
||||||
s.clientSocket1.numPacketsInOutGoingBuffer() == 0
|
s.clientSocket1.numPacketsInOutGoingBuffer() == 0
|
||||||
s.clientSocket2.numPacketsInOutGoingBuffer() == 0
|
s.clientSocket2.numPacketsInOutGoingBuffer() == 0
|
||||||
|
|
||||||
let numBytesToTransfer = 5000
|
let numBytesToTransfer = 5000
|
||||||
let client1Data = generateByteArray(rng[], numBytesToTransfer)
|
let client1Data = generateByteArray(rng[], numBytesToTransfer)
|
||||||
let client2Data = generateByteArray(rng[], numBytesToTransfer)
|
let client2Data = generateByteArray(rng[], numBytesToTransfer)
|
||||||
|
|
||||||
discard s.clientSocket1.write(client1Data)
|
discard s.clientSocket1.write(client1Data)
|
||||||
discard s.clientSocket2.write(client2Data)
|
discard s.clientSocket2.write(client2Data)
|
||||||
|
|
||||||
let server1ReadBytes = await s.serverSocket1.read(numBytesToTransfer)
|
let server1ReadBytes = await s.serverSocket1.read(numBytesToTransfer)
|
||||||
let server2ReadBytes = await s.serverSocket2.read(numBytesToTransfer)
|
let server2ReadBytes = await s.serverSocket2.read(numBytesToTransfer)
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
var serverSockets = newAsyncQueue[UtpSocket[TransportAddress]]()
|
var serverSockets = newAsyncQueue[UtpSocket[TransportAddress]]()
|
||||||
var server1Called = newAsyncEvent()
|
var server1Called = newAsyncEvent()
|
||||||
let address1 = initTAddress("127.0.0.1", 9079)
|
let address1 = initTAddress("127.0.0.1", 9079)
|
||||||
let utpProt1 =
|
let utpProt1 =
|
||||||
UtpProtocol.new(setAcceptedCallback(server1Called), address1, SocketConfig.init(lowSynTimeout))
|
UtpProtocol.new(setAcceptedCallback(server1Called), address1, SocketConfig.init(lowSynTimeout))
|
||||||
|
|
||||||
let address2 = initTAddress("127.0.0.1", 9080)
|
let address2 = initTAddress("127.0.0.1", 9080)
|
||||||
|
@ -401,7 +401,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
SocketConfig.init(),
|
SocketConfig.init(),
|
||||||
allowOneIdCallback(allowedId)
|
allowOneIdCallback(allowedId)
|
||||||
)
|
)
|
||||||
|
|
||||||
let allowedSocketRes = await utpProt1.connectTo(address3, allowedId)
|
let allowedSocketRes = await utpProt1.connectTo(address3, allowedId)
|
||||||
let notAllowedSocketRes = await utpProt2.connectTo(address3, allowedId + 1)
|
let notAllowedSocketRes = await utpProt2.connectTo(address3, allowedId + 1)
|
||||||
|
|
||||||
|
@ -424,7 +424,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
asyncTest "Success data transfer of a lot of data should increase available window on sender side":
|
asyncTest "Success data transfer of a lot of data should increase available window on sender side":
|
||||||
let s = await initClientServerScenario()
|
let s = await initClientServerScenario()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s.clientSocket.isConnected()
|
s.clientSocket.isConnected()
|
||||||
# initially window has value equal to some pre configured constant
|
# initially window has value equal to some pre configured constant
|
||||||
|
@ -437,7 +437,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
# big transfer of 50kb
|
# big transfer of 50kb
|
||||||
let bytesToTransfer = generateByteArray(rng[], 50000)
|
let bytesToTransfer = generateByteArray(rng[], 50000)
|
||||||
|
|
||||||
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
||||||
|
|
||||||
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
||||||
|
@ -455,7 +455,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
asyncTest "Not used socket should decay its max send window":
|
asyncTest "Not used socket should decay its max send window":
|
||||||
let s = await initClientServerScenario()
|
let s = await initClientServerScenario()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s.clientSocket.isConnected()
|
s.clientSocket.isConnected()
|
||||||
# initially window has value equal to some pre configured constant
|
# initially window has value equal to some pre configured constant
|
||||||
|
@ -468,7 +468,7 @@ procSuite "Utp protocol over udp tests":
|
||||||
|
|
||||||
# big transfer of 50kb
|
# big transfer of 50kb
|
||||||
let bytesToTransfer = generateByteArray(rng[], 50000)
|
let bytesToTransfer = generateByteArray(rng[], 50000)
|
||||||
|
|
||||||
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
let bytesReceivedFromClient = await transferData(s.clientSocket, s.serverSocket, bytesToTransfer)
|
||||||
|
|
||||||
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
# ultimatly all send packets will acked, and outgoing buffer will be empty
|
||||||
|
|
|
@ -52,9 +52,9 @@ procSuite "Utp router unit tests":
|
||||||
r: UtpRouter[int],
|
r: UtpRouter[int],
|
||||||
remote: int,
|
remote: int,
|
||||||
pq: AsyncQueue[(Packet, int)],
|
pq: AsyncQueue[(Packet, int)],
|
||||||
initialRemoteSeq: uint16): (UtpSocket[int], Packet)=
|
initialRemoteSeq: uint16): (UtpSocket[int], Packet)=
|
||||||
let connectFuture = router.connectTo(remote)
|
let connectFuture = router.connectTo(remote)
|
||||||
|
|
||||||
let (initialPacket, sender) = await pq.get()
|
let (initialPacket, sender) = await pq.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -62,10 +62,10 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
asyncTest "Incoming connection should be closed when not receving data for period of time when configured":
|
asyncTest "Incoming connection should be closed when not receving data for period of time when configured":
|
||||||
let q = newAsyncQueue[UtpSocket[int]]()
|
let q = newAsyncQueue[UtpSocket[int]]()
|
||||||
let router =
|
let router =
|
||||||
UtpRouter[int].new(
|
UtpRouter[int].new(
|
||||||
registerIncomingSocketCallback(q),
|
registerIncomingSocketCallback(q),
|
||||||
SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(2))),
|
SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(2))),
|
||||||
|
@ -123,7 +123,7 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
asyncTest "Incoming connection should be in connected state when configured":
|
asyncTest "Incoming connection should be in connected state when configured":
|
||||||
let q = newAsyncQueue[UtpSocket[int]]()
|
let q = newAsyncQueue[UtpSocket[int]]()
|
||||||
let router =
|
let router =
|
||||||
UtpRouter[int].new(
|
UtpRouter[int].new(
|
||||||
registerIncomingSocketCallback(q),
|
registerIncomingSocketCallback(q),
|
||||||
SocketConfig.init(incomingSocketReceiveTimeout = none[Duration]()),
|
SocketConfig.init(incomingSocketReceiveTimeout = none[Duration]()),
|
||||||
|
@ -150,7 +150,7 @@ procSuite "Utp router unit tests":
|
||||||
asyncTest "Incoming connection should change state to connected when receiving data packet":
|
asyncTest "Incoming connection should change state to connected when receiving data packet":
|
||||||
let q = newAsyncQueue[UtpSocket[int]]()
|
let q = newAsyncQueue[UtpSocket[int]]()
|
||||||
let pq = newAsyncQueue[(Packet, int)]()
|
let pq = newAsyncQueue[(Packet, int)]()
|
||||||
let router =
|
let router =
|
||||||
UtpRouter[int].new(
|
UtpRouter[int].new(
|
||||||
registerIncomingSocketCallback(q),
|
registerIncomingSocketCallback(q),
|
||||||
SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(3))),
|
SocketConfig.init(incomingSocketReceiveTimeout = some(seconds(3))),
|
||||||
|
@ -168,20 +168,20 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
let (initialPacket, _) = await pq.get()
|
let (initialPacket, _) = await pq.get()
|
||||||
let socket = await q.get()
|
let socket = await q.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
router.len() == 1
|
router.len() == 1
|
||||||
# socket is not configured to be connected until receiving data
|
# socket is not configured to be connected until receiving data
|
||||||
not socket.isConnected()
|
not socket.isConnected()
|
||||||
|
|
||||||
let encodedData =
|
let encodedData =
|
||||||
encodePacket(
|
encodePacket(
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initSeq + 1,
|
initSeq + 1,
|
||||||
initConnId + 1,
|
initConnId + 1,
|
||||||
initialPacket.header.seqNr - 1,
|
initialPacket.header.seqNr - 1,
|
||||||
10,
|
10,
|
||||||
dataToSend,
|
dataToSend,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -264,7 +264,7 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
let requestedConnectionId = 1'u16
|
let requestedConnectionId = 1'u16
|
||||||
let connectFuture = router.connectTo(testSender2, requestedConnectionId)
|
let connectFuture = router.connectTo(testSender2, requestedConnectionId)
|
||||||
|
|
||||||
let (initialPacket, sender) = await pq.get()
|
let (initialPacket, sender) = await pq.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -274,17 +274,17 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await router.processIncomingBytes(encodePacket(responseAck), testSender2)
|
await router.processIncomingBytes(encodePacket(responseAck), testSender2)
|
||||||
|
|
||||||
let outgoingSocket = await connectFuture
|
let outgoingSocket = await connectFuture
|
||||||
|
|
||||||
check:
|
check:
|
||||||
outgoingSocket.get().isConnected()
|
outgoingSocket.get().isConnected()
|
||||||
router.len() == 1
|
router.len() == 1
|
||||||
|
@ -302,7 +302,7 @@ procSuite "Utp router unit tests":
|
||||||
router.sendCb = initTestSnd(pq)
|
router.sendCb = initTestSnd(pq)
|
||||||
|
|
||||||
let connectFuture = router.connectTo(testSender2)
|
let connectFuture = router.connectTo(testSender2)
|
||||||
|
|
||||||
let (initialPacket, sender) = await pq.get()
|
let (initialPacket, sender) = await pq.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -322,14 +322,14 @@ procSuite "Utp router unit tests":
|
||||||
router.sendCb = initTestSnd(pq)
|
router.sendCb = initTestSnd(pq)
|
||||||
|
|
||||||
let connectFuture = router.connectTo(testSender2)
|
let connectFuture = router.connectTo(testSender2)
|
||||||
|
|
||||||
let (initialPacket, sender) = await pq.get()
|
let (initialPacket, sender) = await pq.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
initialPacket.header.pType == ST_SYN
|
initialPacket.header.pType == ST_SYN
|
||||||
router.len() == 1
|
router.len() == 1
|
||||||
|
|
||||||
await connectFuture.cancelAndWait()
|
await connectFuture.cancelAndWait()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
router.len() == 0
|
router.len() == 0
|
||||||
|
@ -353,7 +353,7 @@ procSuite "Utp router unit tests":
|
||||||
connectResult.error().kind == ErrorWhileSendingSyn
|
connectResult.error().kind == ErrorWhileSendingSyn
|
||||||
cast[TestError](connectResult.error().error) is TestError
|
cast[TestError](connectResult.error().error) is TestError
|
||||||
router.len() == 0
|
router.len() == 0
|
||||||
|
|
||||||
asyncTest "Router should clear closed outgoing connections":
|
asyncTest "Router should clear closed outgoing connections":
|
||||||
let q = newAsyncQueue[UtpSocket[int]]()
|
let q = newAsyncQueue[UtpSocket[int]]()
|
||||||
let pq = newAsyncQueue[(Packet, int)]()
|
let pq = newAsyncQueue[(Packet, int)]()
|
||||||
|
@ -409,7 +409,7 @@ procSuite "Utp router unit tests":
|
||||||
|
|
||||||
check:
|
check:
|
||||||
router.len() == 0
|
router.len() == 0
|
||||||
|
|
||||||
asyncTest "Router close outgoing connection which receives reset":
|
asyncTest "Router close outgoing connection which receives reset":
|
||||||
let q = newAsyncQueue[UtpSocket[int]]()
|
let q = newAsyncQueue[UtpSocket[int]]()
|
||||||
let pq = newAsyncQueue[(Packet, int)]()
|
let pq = newAsyncQueue[(Packet, int)]()
|
||||||
|
|
|
@ -71,7 +71,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
initialPacket.header.pType == ST_SYN
|
initialPacket.header.pType == ST_SYN
|
||||||
|
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
|
@ -159,7 +159,7 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q)
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initalRemoteSeqNr,
|
initalRemoteSeqNr,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
|
@ -168,7 +168,7 @@ procSuite "Utp socket unit test":
|
||||||
data,
|
data,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP1)
|
await outgoingSocket.processPacket(dataP1)
|
||||||
let ack1 = await q.get()
|
let ack1 = await q.get()
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
ack2.header.pType == ST_STATE
|
ack2.header.pType == ST_STATE
|
||||||
# we are acking in one shot whole 10 packets
|
# we are acking in one shot whole 10 packets
|
||||||
ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1)
|
ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1)
|
||||||
|
|
||||||
let receivedData = await outgoingSocket.read(len(data))
|
let receivedData = await outgoingSocket.read(len(data))
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ procSuite "Utp socket unit test":
|
||||||
receivedData == data
|
receivedData == data
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Processing out of order data packet should ignore duplicated not ordered packets":
|
asyncTest "Processing out of order data packet should ignore duplicated not ordered packets":
|
||||||
# TODO test is valid until implementing selective acks
|
# TODO test is valid until implementing selective acks
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
|
@ -240,15 +240,15 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
ack2.header.pType == ST_STATE
|
ack2.header.pType == ST_STATE
|
||||||
# we are acking in one shot whole 10 packets
|
# we are acking in one shot whole 10 packets
|
||||||
ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1)
|
ack2.header.ackNr == initalRemoteSeqNr + uint16(len(packets) - 1)
|
||||||
|
|
||||||
let receivedData = await outgoingSocket.read(len(data))
|
let receivedData = await outgoingSocket.read(len(data))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
receivedData == data
|
receivedData == data
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Processing packets in random order":
|
asyncTest "Processing packets in random order":
|
||||||
# TODO test is valid until implementing selective acks
|
# TODO test is valid until implementing selective acks
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
|
@ -274,7 +274,7 @@ procSuite "Utp socket unit test":
|
||||||
# as they can be fired at any point. What matters is that data is passed
|
# as they can be fired at any point. What matters is that data is passed
|
||||||
# in same order as received.
|
# in same order as received.
|
||||||
receivedData == data
|
receivedData == data
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Ignoring totally out of order packet":
|
asyncTest "Ignoring totally out of order packet":
|
||||||
|
@ -332,7 +332,7 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
await outgoingSocket.processPacket(responseAck)
|
await outgoingSocket.processPacket(responseAck)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
outgoingSocket.numPacketsInOutGoingBuffer() == 0
|
outgoingSocket.numPacketsInOutGoingBuffer() == 0
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
@ -356,7 +356,7 @@ procSuite "Utp socket unit test":
|
||||||
not writeFut1.finished()
|
not writeFut1.finished()
|
||||||
not writeFut2.finished()
|
not writeFut2.finished()
|
||||||
|
|
||||||
outgoingSocket.destroy()
|
outgoingSocket.destroy()
|
||||||
|
|
||||||
yield writeFut1
|
yield writeFut1
|
||||||
yield writeFut2
|
yield writeFut2
|
||||||
|
@ -367,7 +367,7 @@ procSuite "Utp socket unit test":
|
||||||
writeFut1.read().isErr()
|
writeFut1.read().isErr()
|
||||||
writeFut2.read().isErr()
|
writeFut2.read().isErr()
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Cancelled write futures should not be processed if cancelled before processing":
|
asyncTest "Cancelled write futures should not be processed if cancelled before processing":
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
|
@ -416,14 +416,14 @@ procSuite "Utp socket unit test":
|
||||||
p1.payload == dataToWrite1
|
p1.payload == dataToWrite1
|
||||||
p2.payload == dataToWrite3
|
p2.payload == dataToWrite3
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Socket should re-send data packet configurable number of times before declaring failure":
|
asyncTest "Socket should re-send data packet configurable number of times before declaring failure":
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
let initalRemoteSeqNr = 10'u16
|
let initalRemoteSeqNr = 10'u16
|
||||||
|
|
||||||
let outgoingSocket = newOutgoingSocket[TransportAddress](
|
let outgoingSocket = newOutgoingSocket[TransportAddress](
|
||||||
testAddress,
|
testAddress,
|
||||||
initTestSnd(q),
|
initTestSnd(q),
|
||||||
SocketConfig.init(milliseconds(3000), 2),
|
SocketConfig.init(milliseconds(3000), 2),
|
||||||
defaultRcvOutgoingId,
|
defaultRcvOutgoingId,
|
||||||
|
@ -437,7 +437,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
initialPacket.header.pType == ST_SYN
|
initialPacket.header.pType == ST_SYN
|
||||||
|
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initalRemoteSeqNr,
|
initalRemoteSeqNr,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
|
@ -499,13 +499,13 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let finP =
|
let finP =
|
||||||
finPacket(
|
finPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(finP)
|
await outgoingSocket.processPacket(finP)
|
||||||
let ack1 = await q.get()
|
let ack1 = await q.get()
|
||||||
|
|
||||||
|
@ -525,35 +525,35 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let readF = outgoingSocket.read()
|
let readF = outgoingSocket.read()
|
||||||
|
|
||||||
let dataP =
|
let dataP =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data,
|
data,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq + 1,
|
initialRemoteSeq + 1,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data1,
|
data1,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
let finP =
|
let finP =
|
||||||
finPacket(
|
finPacket(
|
||||||
initialRemoteSeq + 2,
|
initialRemoteSeq + 2,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(finP)
|
await outgoingSocket.processPacket(finP)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -565,7 +565,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
not readF.finished()
|
not readF.finished()
|
||||||
not outgoingSocket.atEof()
|
not outgoingSocket.atEof()
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP)
|
await outgoingSocket.processPacket(dataP)
|
||||||
|
|
||||||
let bytes = await readF
|
let bytes = await readF
|
||||||
|
@ -587,34 +587,34 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let readF = outgoingSocket.read()
|
let readF = outgoingSocket.read()
|
||||||
|
|
||||||
let dataP =
|
let dataP =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data,
|
data,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
let finP =
|
let finP =
|
||||||
finPacket(
|
finPacket(
|
||||||
initialRemoteSeq + 1,
|
initialRemoteSeq + 1,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
# dataP1 has seqNr larger than fin, there fore it should be considered past eof and never passed
|
# dataP1 has seqNr larger than fin, there fore it should be considered past eof and never passed
|
||||||
# to user of library
|
# to user of library
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq + 2,
|
initialRemoteSeq + 2,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data1,
|
data1,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -630,7 +630,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
not readF.finished()
|
not readF.finished()
|
||||||
not outgoingSocket.atEof()
|
not outgoingSocket.atEof()
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP)
|
await outgoingSocket.processPacket(dataP)
|
||||||
|
|
||||||
# it is in order dataP1, as we have now processed dataP + fin which came before
|
# it is in order dataP1, as we have now processed dataP + fin which came before
|
||||||
|
@ -674,20 +674,20 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
sendFin.header.pType == ST_FIN
|
sendFin.header.pType == ST_FIN
|
||||||
|
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
sendFin.header.seqNr,
|
sendFin.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(responseAck)
|
await outgoingSocket.processPacket(responseAck)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
not outgoingSocket.isConnected()
|
not outgoingSocket.isConnected()
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Trying to write data onto closed socket should return error":
|
asyncTest "Trying to write data onto closed socket should return error":
|
||||||
|
@ -737,16 +737,16 @@ procSuite "Utp socket unit test":
|
||||||
let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize)
|
let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize)
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeqNr, q, testBufferSize, sCfg)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeqNr, q, testBufferSize, sCfg)
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeqNr,
|
initialRemoteSeqNr,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data,
|
data,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP1)
|
await outgoingSocket.processPacket(dataP1)
|
||||||
|
|
||||||
let ack1 = await q.get()
|
let ack1 = await q.get()
|
||||||
|
@ -782,16 +782,16 @@ procSuite "Utp socket unit test":
|
||||||
let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize)
|
let sCfg = SocketConfig.init(optRcvBuffer = initialRcvBufferSize)
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q, testBufferSize, sCfg)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q, testBufferSize, sCfg)
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initalRemoteSeqNr,
|
initalRemoteSeqNr,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data,
|
data,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP1)
|
await outgoingSocket.processPacket(dataP1)
|
||||||
|
|
||||||
let ack1 = await q.get()
|
let ack1 = await q.get()
|
||||||
|
@ -815,7 +815,7 @@ procSuite "Utp socket unit test":
|
||||||
# we have read all data from rcv buffer, advertised window should go back to
|
# we have read all data from rcv buffer, advertised window should go back to
|
||||||
# initial size
|
# initial size
|
||||||
sentData.header.wndSize == initialRcvBufferSize
|
sentData.header.wndSize == initialRcvBufferSize
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Socket should ignore packets with bad ack number":
|
asyncTest "Socket should ignore packets with bad ack number":
|
||||||
|
@ -828,36 +828,36 @@ procSuite "Utp socket unit test":
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
||||||
|
|
||||||
# data packet with ack nr set above our seq nr i.e packet from the future
|
# data packet with ack nr set above our seq nr i.e packet from the future
|
||||||
let dataFuture =
|
let dataFuture =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr + 1,
|
initialPacket.header.seqNr + 1,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data1,
|
data1,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
# data packet wth ack number set below out ack window i.e packet too old
|
# data packet wth ack number set below out ack window i.e packet too old
|
||||||
let dataTooOld =
|
let dataTooOld =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr - allowedAckWindow - 1,
|
initialPacket.header.seqNr - allowedAckWindow - 1,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data2,
|
data2,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
let dataOk =
|
let dataOk =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
data3,
|
data3,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataFuture)
|
await outgoingSocket.processPacket(dataFuture)
|
||||||
await outgoingSocket.processPacket(dataTooOld)
|
await outgoingSocket.processPacket(dataTooOld)
|
||||||
await outgoingSocket.processPacket(dataOk)
|
await outgoingSocket.processPacket(dataOk)
|
||||||
|
@ -867,7 +867,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
# data1 and data2 were sent in bad packets we should only receive data3
|
# data1 and data2 were sent in bad packets we should only receive data3
|
||||||
receivedBytes == data3
|
receivedBytes == data3
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Writing data should increase current bytes window":
|
asyncTest "Writing data should increase current bytes window":
|
||||||
|
@ -914,9 +914,9 @@ procSuite "Utp socket unit test":
|
||||||
let responseAck =
|
let responseAck =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
sentPacket.header.seqNr,
|
sentPacket.header.seqNr,
|
||||||
testBufferSize,
|
testBufferSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -946,7 +946,7 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
|
|
||||||
discard await outgoingSocket.write(dataToWrite1)
|
discard await outgoingSocket.write(dataToWrite1)
|
||||||
|
|
||||||
let sentPacket1 = await q.get()
|
let sentPacket1 = await q.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -986,9 +986,9 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let someAckFromRemote =
|
let someAckFromRemote =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
uint32(len(dataToWrite)),
|
uint32(len(dataToWrite)),
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
@ -1014,12 +1014,12 @@ procSuite "Utp socket unit test":
|
||||||
# remote is initialized with buffer to small to handle whole payload
|
# remote is initialized with buffer to small to handle whole payload
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
||||||
let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize())
|
let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize())
|
||||||
let someAckFromRemote =
|
let someAckFromRemote =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
remoteRcvWindowSize,
|
remoteRcvWindowSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1027,7 +1027,7 @@ procSuite "Utp socket unit test":
|
||||||
await outgoingSocket.processPacket(someAckFromRemote)
|
await outgoingSocket.processPacket(someAckFromRemote)
|
||||||
|
|
||||||
let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize))
|
let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize))
|
||||||
|
|
||||||
let writeFut = outgoingSocket.write(twoPacketData)
|
let writeFut = outgoingSocket.write(twoPacketData)
|
||||||
|
|
||||||
# after this time first packet will be send and will timeout, but the write should not
|
# after this time first packet will be send and will timeout, but the write should not
|
||||||
|
@ -1049,9 +1049,9 @@ procSuite "Utp socket unit test":
|
||||||
# remote is initialized with buffer to small to handle whole payload
|
# remote is initialized with buffer to small to handle whole payload
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
||||||
let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize())
|
let remoteRcvWindowSize = uint32(outgoingSocket.getPacketSize())
|
||||||
let someAckFromRemote =
|
let someAckFromRemote =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr,
|
initialPacket.header.seqNr,
|
||||||
remoteRcvWindowSize,
|
remoteRcvWindowSize,
|
||||||
|
@ -1062,15 +1062,15 @@ procSuite "Utp socket unit test":
|
||||||
await outgoingSocket.processPacket(someAckFromRemote)
|
await outgoingSocket.processPacket(someAckFromRemote)
|
||||||
|
|
||||||
let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize))
|
let twoPacketData = generateByteArray(rng[], int(2 * remoteRcvWindowSize))
|
||||||
|
|
||||||
let writeFut = outgoingSocket.write(twoPacketData)
|
let writeFut = outgoingSocket.write(twoPacketData)
|
||||||
|
|
||||||
let firstAckFromRemote =
|
let firstAckFromRemote =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr + 1,
|
initialPacket.header.seqNr + 1,
|
||||||
remoteRcvWindowSize,
|
remoteRcvWindowSize,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1096,11 +1096,11 @@ procSuite "Utp socket unit test":
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
let initialRemoteSeq = 10'u16
|
let initialRemoteSeq = 10'u16
|
||||||
let someData = @[1'u8]
|
let someData = @[1'u8]
|
||||||
let (outgoingSocket, packet) =
|
let (outgoingSocket, packet) =
|
||||||
connectOutGoingSocket(
|
connectOutGoingSocket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
q,
|
q,
|
||||||
remoteReceiveBuffer = 0,
|
remoteReceiveBuffer = 0,
|
||||||
cfg = SocketConfig.init(
|
cfg = SocketConfig.init(
|
||||||
remoteWindowResetTimeout = seconds(3)
|
remoteWindowResetTimeout = seconds(3)
|
||||||
)
|
)
|
||||||
|
@ -1121,7 +1121,7 @@ procSuite "Utp socket unit test":
|
||||||
# Ultimately, after 3 second remote rcv window will be reseted to minimal value
|
# Ultimately, after 3 second remote rcv window will be reseted to minimal value
|
||||||
# and write will be able to progress
|
# and write will be able to progress
|
||||||
let writeResult = await writeFut
|
let writeResult = await writeFut
|
||||||
|
|
||||||
let p = await q.get()
|
let p = await q.get()
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -1134,8 +1134,8 @@ procSuite "Utp socket unit test":
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
let initialRemoteSeq = 10'u16
|
let initialRemoteSeq = 10'u16
|
||||||
let someData1 = @[1'u8]
|
let someData1 = @[1'u8]
|
||||||
let somedata2 = @[2'u8]
|
let someData2 = @[2'u8]
|
||||||
let (outgoingSocket, initialPacket) =
|
let (outgoingSocket, initialPacket) =
|
||||||
connectOutGoingSocket(
|
connectOutGoingSocket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
q,
|
q,
|
||||||
|
@ -1163,10 +1163,10 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let someAckFromRemote =
|
let someAckFromRemote =
|
||||||
ackPacket(
|
ackPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
initialPacket.header.seqNr + 1,
|
initialPacket.header.seqNr + 1,
|
||||||
10,
|
10,
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1181,7 +1181,7 @@ procSuite "Utp socket unit test":
|
||||||
check:
|
check:
|
||||||
writeFut2.finished()
|
writeFut2.finished()
|
||||||
firstPacket.payload == someData1
|
firstPacket.payload == someData1
|
||||||
secondPacket.payload == somedata2
|
secondPacket.payload == someData2
|
||||||
|
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
|
@ -1191,7 +1191,7 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initialRemoteSeq, q)
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initialRemoteSeq,
|
initialRemoteSeq,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
|
@ -1203,7 +1203,7 @@ procSuite "Utp socket unit test":
|
||||||
|
|
||||||
check:
|
check:
|
||||||
outgoingSocket.isConnected()
|
outgoingSocket.isConnected()
|
||||||
|
|
||||||
# necessary to avoid timestampDiff near 0 and flaky tests
|
# necessary to avoid timestampDiff near 0 and flaky tests
|
||||||
await sleepAsync(milliseconds(50))
|
await sleepAsync(milliseconds(50))
|
||||||
await outgoingSocket.processPacket(dataP1)
|
await outgoingSocket.processPacket(dataP1)
|
||||||
|
@ -1216,7 +1216,7 @@ procSuite "Utp socket unit test":
|
||||||
await outgoingSocket.destroyWait()
|
await outgoingSocket.destroyWait()
|
||||||
|
|
||||||
asyncTest "Re-sent packet should have updated timestamps and ack numbers":
|
asyncTest "Re-sent packet should have updated timestamps and ack numbers":
|
||||||
let q = newAsyncQueue[Packet]()
|
let q = newAsyncQueue[Packet]()
|
||||||
let initalRemoteSeqNr = 10'u16
|
let initalRemoteSeqNr = 10'u16
|
||||||
|
|
||||||
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q)
|
let (outgoingSocket, initialPacket) = connectOutGoingSocket(initalRemoteSeqNr, q)
|
||||||
|
@ -1236,7 +1236,7 @@ procSuite "Utp socket unit test":
|
||||||
secondSend.header.timestamp > firstSend.header.timestamp
|
secondSend.header.timestamp > firstSend.header.timestamp
|
||||||
firstSend.header.ackNr == secondSend.header.ackNr
|
firstSend.header.ackNr == secondSend.header.ackNr
|
||||||
|
|
||||||
let dataP1 =
|
let dataP1 =
|
||||||
dataPacket(
|
dataPacket(
|
||||||
initalRemoteSeqNr,
|
initalRemoteSeqNr,
|
||||||
initialPacket.header.connectionId,
|
initialPacket.header.connectionId,
|
||||||
|
@ -1245,7 +1245,7 @@ procSuite "Utp socket unit test":
|
||||||
@[1'u8],
|
@[1'u8],
|
||||||
0
|
0
|
||||||
)
|
)
|
||||||
|
|
||||||
await outgoingSocket.processPacket(dataP1)
|
await outgoingSocket.processPacket(dataP1)
|
||||||
|
|
||||||
let ack = await q.get()
|
let ack = await q.get()
|
||||||
|
|
Loading…
Reference in New Issue