diff --git a/eth.nimble b/eth.nimble index 1cd29d0..449bfdf 100644 --- a/eth.nimble +++ b/eth.nimble @@ -20,7 +20,8 @@ requires "nim >= 1.6.0", "testutils", "unittest2", "results", - "minilru" + "minilru", + "snappy" let nimc = getEnv("NIMC", "nim") # Which nim compiler to use let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js) diff --git a/eth/common/base_rlp.nim b/eth/common/base_rlp.nim index d4c9f46..4765cfa 100644 --- a/eth/common/base_rlp.nim +++ b/eth/common/base_rlp.nim @@ -7,30 +7,17 @@ {.push raises: [].} -import std/typetraits, ./base, ../rlp +import + std/typetraits, ./base, ../rlp, + ../rlp/results as rlp_results -export base, rlp +export base, rlp, rlp_results -# TODO why is rlp serialization of `Opt` here and not in rlp? -proc append*[T](w: var RlpWriter, val: Opt[T]) = - mixin append - - if val.isSome: - w.append(val.get()) - else: - w.append("") template read*[T](rlp: var Rlp, val: var T) = mixin read val = rlp.read(type val) -proc read*[T](rlp: var Rlp, val: var Opt[T]) {.raises: [RlpError].} = - mixin read - if rlp.blobLen != 0: - val = Opt.some(rlp.read(T)) - else: - rlp.skipElem - proc read*(rlp: var Rlp, T: type StUint): T {.raises: [RlpError].} = if rlp.isBlob: let bytes = rlp.toBytes diff --git a/eth/common/blocks.nim b/eth/common/blocks.nim index 6d539f9..9dd4fd0 100644 --- a/eth/common/blocks.nim +++ b/eth/common/blocks.nim @@ -18,49 +18,16 @@ type address* : Address amount* : uint64 - DepositRequest* = object # EIP-6110 - pubkey* : Bytes48 - withdrawalCredentials*: Bytes32 - amount* : uint64 - signature* : Bytes96 - index* : uint64 - - WithdrawalRequest* = object # EIP-7002 - sourceAddress* : Address - validatorPubkey*: Bytes48 - amount* : uint64 - - ConsolidationRequest* = object # EIP-7251 - sourceAddress*: Address - sourcePubkey* : Bytes48 - targetPubkey* : Bytes48 - - RequestType* = enum - DepositRequestType # EIP-6110 - WithdrawalRequestType # EIP-7002 - ConsolidationRequestType # EIP-7251 - - Request* = object - case requestType*: RequestType - of DepositRequestType: - deposit*: DepositRequest - of WithdrawalRequestType: - withdrawal*: WithdrawalRequest - of ConsolidationRequestType: - consolidation*: ConsolidationRequest - BlockBody* = object transactions*: seq[Transaction] uncles*: seq[Header] withdrawals*: Opt[seq[Withdrawal]] # EIP-4895 - requests*: Opt[seq[Request]] # EIP-7865 Block* = object header* : Header transactions*: seq[Transaction] uncles* : seq[Header] withdrawals*: Opt[seq[Withdrawal]] # EIP-4895 - requests*: Opt[seq[Request]] # EIP-7865 const EMPTY_UNCLE_HASH* = hash32"1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347" @@ -77,15 +44,3 @@ func init*(T: type Block, header: Header, body: BlockBody): T = template txs*(blk: Block): seq[Transaction] = # Legacy name emulation blk.transactions - -func `==`*(a, b: Request): bool = - if a.requestType != b.requestType: - return false - - case a.requestType - of DepositRequestType: - a.deposit == b.deposit - of WithdrawalRequestType: - a.withdrawal == b.withdrawal - of ConsolidationRequestType: - a.consolidation == b.consolidation diff --git a/eth/common/blocks_rlp.nim b/eth/common/blocks_rlp.nim index 3769768..f5416e3 100644 --- a/eth/common/blocks_rlp.nim +++ b/eth/common/blocks_rlp.nim @@ -12,146 +12,3 @@ import ./[addresses_rlp, blocks, base_rlp, hashes_rlp], ../rlp from stew/objects import checkedEnumAssign export addresses_rlp, blocks, base_rlp, hashes_rlp, rlp - -proc append*(rlpWriter: var RlpWriter, request: DepositRequest) = - rlpWriter.appendRawBytes([DepositRequestType.byte]) - rlpWriter.startList(5) - rlpWriter.append(request.pubkey) - rlpWriter.append(request.withdrawalCredentials) - rlpWriter.append(request.amount) - rlpWriter.append(request.signature) - rlpWriter.append(request.index) - -proc read*(rlp: var Rlp, T: type DepositRequest): T {.raises: [RlpError].} = - if not rlp.hasData: - raise (ref MalformedRlpError)( - msg: "DepositRequestType expected but source RLP is empty" - ) - let reqType = rlp.readRawByte() - if reqType != DepositRequestType: - raise (ref UnsupportedRlpError)(msg: "Unexpected DepositRequestType: " & $reqType) - - var res: DepositRequest - rlp.tryEnterList() - rlp.read(res.pubkey) - rlp.read(res.withdrawalCredentials) - rlp.read(res.amount) - rlp.read(res.signature) - rlp.read(res.index) - if rlp.hasData: - raise (ref MalformedRlpError)(msg: "Extra data after DepositRequest") - res - -proc append*(rlpWriter: var RlpWriter, request: WithdrawalRequest) = - rlpWriter.appendRawBytes([WithdrawalRequestType.byte]) - rlpWriter.startList(3) - rlpWriter.append(request.sourceAddress) - rlpWriter.append(request.validatorPubkey) - rlpWriter.append(request.amount) - -proc read*(rlp: var Rlp, T: type WithdrawalRequest): T {.raises: [RlpError].} = - if not rlp.hasData: - raise (ref MalformedRlpError)( - msg: "WithdrawalRequestType expected but source RLP is empty" - ) - let reqType = rlp.readRawByte() - if reqType != WithdrawalRequestType: - raise - (ref UnsupportedRlpError)(msg: "Unexpected WithdrawalRequestType: " & $reqType) - - var res: WithdrawalRequest - rlp.tryEnterList() - rlp.read(res.sourceAddress) - rlp.read(res.validatorPubkey) - rlp.read(res.amount) - if rlp.hasData: - raise (ref MalformedRlpError)(msg: "Extra data after WithdrawalRequest") - res - -proc append*(rlpWriter: var RlpWriter, request: ConsolidationRequest) = - rlpWriter.appendRawBytes([ConsolidationRequestType.byte]) - rlpWriter.startList(3) - rlpWriter.append(request.sourceAddress) - rlpWriter.append(request.sourcePubkey) - rlpWriter.append(request.targetPubkey) - -proc read*(rlp: var Rlp, T: type ConsolidationRequest): T {.raises: [RlpError].} = - if not rlp.hasData: - raise (ref MalformedRlpError)( - msg: "ConsolidationRequestType expected but source RLP is empty" - ) - let reqType = rlp.readRawByte() - if reqType != ConsolidationRequestType: - raise - (ref UnsupportedRlpError)(msg: "Unexpected ConsolidationRequestType: " & $reqType) - - var res: ConsolidationRequest - rlp.tryEnterList() - rlp.read(res.sourceAddress) - rlp.read(res.sourcePubkey) - rlp.read(res.targetPubkey) - if rlp.hasData: - raise (ref MalformedRlpError)(msg: "Extra data after ConsolidationRequest") - res - -proc append*(rlpWriter: var RlpWriter, request: Request) = - case request.requestType - of DepositRequestType: - rlpWriter.append(request.deposit) - of WithdrawalRequestType: - rlpWriter.append(request.withdrawal) - of ConsolidationRequestType: - rlpWriter.append(request.consolidation) - -proc append*(rlpWriter: var RlpWriter, reqs: seq[Request] | openArray[Request]) = - rlpWriter.startList(reqs.len) - for req in reqs: - rlpWriter.append(rlp.encode(req)) - -proc read*(rlp: var Rlp, T: type Request): T {.raises: [RlpError].} = - if not rlp.hasData: - raise newException(MalformedRlpError, "Request expected but source RLP is empty") - if not rlp.isSingleByte: - raise newException( - MalformedRlpError, "RequestType byte is out of range, must be 0x00 to 0x7f" - ) - - let reqType = rlp.getByteValue - rlp.position += 1 - - var reqVal: RequestType - if checkedEnumAssign(reqVal, reqType): - result = Request(requestType: reqVal) - rlp.tryEnterList() - case reqVal - of DepositRequestType: - rlp.read(result.deposit.pubkey) - rlp.read(result.deposit.withdrawalCredentials) - rlp.read(result.deposit.amount) - rlp.read(result.deposit.signature) - rlp.read(result.deposit.index) - of WithdrawalRequestType: - rlp.read(result.withdrawal.sourceAddress) - rlp.read(result.withdrawal.validatorPubkey) - rlp.read(result.withdrawal.amount) - of ConsolidationRequestType: - rlp.read(result.consolidation.sourceAddress) - rlp.read(result.consolidation.sourcePubkey) - rlp.read(result.consolidation.targetPubkey) - else: - raise (ref UnsupportedRlpError)(msg: "Unexpected RequestType: " & $reqType) - -proc read*( - rlp: var Rlp, T: (type seq[Request]) | (type openArray[Request]) -): seq[Request] {.raises: [RlpError].} = - if not rlp.isList: - raise newException( - RlpTypeMismatch, "Requests list expected, but source RLP is not a list" - ) - - var reqs: seq[Request] - for item in rlp: - var rr = rlpFromBytes(rlp.read(seq[byte])) - reqs.add rr.read(Request) - - reqs diff --git a/eth/common/eth_hash.nim b/eth/common/eth_hash.nim index a94bcb3..a9583bd 100644 --- a/eth/common/eth_hash.nim +++ b/eth/common/eth_hash.nim @@ -30,5 +30,11 @@ template keccakHash*(v: Address): Hash32 {.deprecated.} = from nimcrypto/hash import MDigest -converter toMDigest*(v: Hash32): MDigest[256] {.deprecated.} = +# TODO https://github.com/nim-lang/Nim/issues/24241 +when (NimMajor, NimMinor) >= (2, 12) or defined(ethDigestConverterWarning): + {.pragma: convdeprecated, deprecated.} +else: + {.pragma: convdeprecated.} + +converter toMDigest*(v: Hash32): MDigest[256] {.convdeprecated.} = MDigest[256](data: v.data) diff --git a/eth/common/eth_types.nim b/eth/common/eth_types.nim index 00460b7..e8086fb 100644 --- a/eth/common/eth_types.nim +++ b/eth/common/eth_types.nim @@ -33,14 +33,11 @@ type EthAccount* = Account EthAddress* = Address EthBlock* = Block - EthConsolidationRequest* = ConsolidationRequest - EthDepositRequest* = DepositRequest EthHash32* = Hash32 EthHeader* = Header EthTransaction* = Transaction EthReceipt* = Receipt EthWithdrawal* = Withdrawal - EthWithdrawalRequest* = WithdrawalRequest func init*(T: type BlockHashOrNumber, str: string): T {.raises: [ValueError].} = if str.startsWith "0x": diff --git a/eth/common/eth_types_rlp.nim b/eth/common/eth_types_rlp.nim index 07b1f19..7ce4e37 100644 --- a/eth/common/eth_types_rlp.nim +++ b/eth/common/eth_types_rlp.nim @@ -6,13 +6,13 @@ import "."/[ - accounts_rlp, addresses_rlp, base_rlp, blocks_rlp, eth_types, hashes_rlp, + accounts_rlp, addresses_rlp, base_rlp, eth_types, hashes_rlp, headers_rlp, receipts_rlp, times_rlp, transactions_rlp, ], ../rlp export - accounts_rlp, addresses_rlp, base_rlp, blocks_rlp, eth_types, hashes_rlp, + accounts_rlp, addresses_rlp, base_rlp, eth_types, hashes_rlp, headers_rlp, receipts_rlp, times_rlp, transactions_rlp, rlp proc append*(rlpWriter: var RlpWriter, value: BlockHashOrNumber) = diff --git a/eth/p2p.nim b/eth/p2p.nim index 32647e9..e5099d2 100644 --- a/eth/p2p.nim +++ b/eth/p2p.nim @@ -27,7 +27,7 @@ proc addCapability*(node: EthereumNode, let pos = lowerBound(node.protocols, p, rlpx.cmp) node.protocols.insert(p, pos) - node.capabilities.insert(p.asCapability, pos) + node.capabilities.insert(p.capability, pos) if p.networkStateInitializer != nil and networkState.isNil: node.protocolStates[p.index] = p.networkStateInitializer(node) @@ -79,12 +79,11 @@ proc newEthereumNode*( networkId: NetworkId, clientId = "nim-eth-p2p", addAllCapabilities = true, - useCompression: bool = false, minPeers = 10, bootstrapNodes: seq[ENode] = @[], bindUdpPort: Port, bindTcpPort: Port, - bindIp = IPv4_any(), + bindIp = IPv6_any(), rng = newRng()): EthereumNode = if rng == nil: # newRng could fail @@ -105,11 +104,6 @@ proc newEthereumNode*( keys.seckey, address, bootstrapNodes, bindUdpPort, bindIp, rng) result.rng = rng - - when useSnappy: - result.protocolVersion = if useCompression: devp2pSnappyVersion - else: devp2pVersion - result.protocolStates.newSeq protocolCount() result.peerPool = newPeerPool( diff --git a/eth/p2p/auth.nim b/eth/p2p/auth.nim index 2a97255..b89db6d 100644 --- a/eth/p2p/auth.nim +++ b/eth/p2p/auth.nim @@ -8,7 +8,10 @@ # MIT license (LICENSE-MIT) # -## This module implements Ethereum RLPx authentication +## This module implements Ethereum EIP-8 RLPx authentication - pre-EIP-8 +## messages are not supported +## https://github.com/ethereum/devp2p/blob/5713591d0366da78a913a811c7502d9ca91d29a8/rlpx.md#initial-handshake +## https://github.com/ethereum/EIPs/blob/b479473414cf94445b450c266a9dedc079a12158/EIPS/eip-8.md {.push raises: [].} @@ -25,79 +28,70 @@ export results type keccak256 = keccak.keccak256 const - SupportedRlpxVersion* = 4'u8 - # Auth message sizes + MsgLenLenEIP8* = 2 + ## auth-size = size of enc-auth-body, encoded as a big-endian 16-bit integer + ## ack-size = size of enc-ack-body, encoded as a big-endian 16-bit integer - # Pre EIP8 - PlainAuthMessageV4Length* = 194 - AuthMessageV4Length* = 307 + MinPadLenEIP8* = 100 + MaxPadLenEIP8* = 300 + ## Padding makes message length unpredictable which makes packet filtering + ## a tiny bit harder - although not necessary any more, we always add at + ## least 100 bytes of padding to make the message distinguishable from + ## pre-EIP8 and at most 200 to stay within recommendation - # EIP8 - # signature + pubkey + nounce + version + rlp encoding overhead + # signature + pubkey + nonce + version + rlp encoding overhead # 65 + 64 + 32 + 1 + 7 = 169 PlainAuthMessageEIP8Length = 169 - PlainAuthMessageMaxEIP8* = PlainAuthMessageEIP8Length + 255 # with padding + PlainAuthMessageMaxEIP8 = PlainAuthMessageEIP8Length + MaxPadLenEIP8 # Min. encrypted message + size prefix = 284 - AuthMessageEIP8Length* = PlainAuthMessageEIP8Length + eciesOverheadLength + 2 - AuthMessageMaxEIP8* = AuthMessageEIP8Length + 255 # with padding + AuthMessageEIP8Length* = + eciesEncryptedLength(PlainAuthMessageEIP8Length) + MsgLenLenEIP8 + AuthMessageMaxEIP8* = AuthMessageEIP8Length + MaxPadLenEIP8 + ## Minimal output buffer size to pass into `authMessage` # Ack message sizes - # Pre EIP8 - PlainAckMessageV4Length* = 97 - AckMessageV4Length* = 210 - - # EIP 8 # pubkey + nounce + version + rlp encoding overhead # 64 + 32 + 1 + 5 = 102 - PlainAckMessageEIP8Length* = 102 - PlainAckMessageMaxEIP8* = PlainAckMessageEIP8Length + 255 # with padding + PlainAckMessageEIP8Length = 102 + PlainAckMessageMaxEIP8 = PlainAckMessageEIP8Length + MaxPadLenEIP8 # Min. encrypted message + size prefix = 217 - AckMessageEIP8Length* = PlainAckMessageEIP8Length + eciesOverheadLength + 2 - AckMessageMaxEIP8* = AckMessageEIP8Length + 255 # with padding + AckMessageEIP8Length* = + eciesEncryptedLength(PlainAckMessageEIP8Length) + MsgLenLenEIP8 + AckMessageMaxEIP8* = AckMessageEIP8Length + MaxPadLenEIP8 + ## Minimal output buffer size to pass into `ackMessage` + + Vsn = [byte 4] + ## auth-vsn = 4 + ## ack-vsn = 4 type Nonce* = array[KeyLength, byte] - AuthMessageV4* {.packed.} = object - signature: array[RawSignatureSize, byte] - keyhash: array[keccak256.sizeDigest, byte] - pubkey: array[RawPublicKeySize, byte] - nonce: array[keccak256.sizeDigest, byte] - flag: byte - - AckMessageV4* {.packed.} = object - pubkey: array[RawPublicKeySize, byte] - nonce: array[keccak256.sizeDigest, byte] - flag: byte - HandshakeFlag* = enum - Initiator, ## `Handshake` owner is connection initiator - Responder, ## `Handshake` owner is connection responder - EIP8 ## Flag indicates that EIP-8 handshake is used + Initiator ## `Handshake` owner is connection initiator + Responder ## `Handshake` owner is connection responder AuthError* = enum - EcdhError = "auth: ECDH shared secret could not be calculated" - BufferOverrun = "auth: buffer overrun" - SignatureError = "auth: signature could not be obtained" - EciesError = "auth: ECIES encryption/decryption error" - InvalidPubKey = "auth: invalid public key" - InvalidAuth = "auth: invalid Authentication message" - InvalidAck = "auth: invalid Authentication ACK message" - RlpError = "auth: error while decoding RLP stream" + EcdhError = "auth: ECDH shared secret could not be calculated" + BufferOverrun = "auth: buffer overrun" + SignatureError = "auth: signature could not be obtained" + EciesError = "auth: ECIES encryption/decryption error" + InvalidPubKey = "auth: invalid public key" + InvalidAuth = "auth: invalid Authentication message" + InvalidAck = "auth: invalid Authentication ACK message" + RlpError = "auth: error while decoding RLP stream" IncompleteError = "auth: data incomplete" Handshake* = object - version*: uint8 ## protocol version - flags*: set[HandshakeFlag] ## handshake flags - host*: KeyPair ## host keypair - ephemeral*: KeyPair ## ephemeral host keypair - remoteHPubkey*: PublicKey ## remote host public key - remoteEPubkey*: PublicKey ## remote host ephemeral public key - initiatorNonce*: Nonce ## initiator nonce - responderNonce*: Nonce ## responder nonce - expectedLength*: int ## expected incoming message length + flags*: set[HandshakeFlag] ## handshake flags + host*: KeyPair ## host keypair + ephemeral*: KeyPair ## ephemeral host keypair + remoteHPubkey*: PublicKey ## remote host public key + remoteEPubkey*: PublicKey ## remote host ephemeral public key + initiatorNonce*: Nonce ## initiator nonce + responderNonce*: Nonce ## responder nonce ConnectionSecret* = object aesKey*: array[aes256.sizeKey, byte] @@ -111,291 +105,163 @@ template toa(a, b, c: untyped): untyped = toOpenArray((a), (b), (b) + (c) - 1) proc mapErrTo[T, E](r: Result[T, E], v: static AuthError): AuthResult[T] = - r.mapErr(proc (e: E): AuthError = v) + r.mapErr( + proc(e: E): AuthError = + v + ) proc init*( - T: type Handshake, rng: var HmacDrbgContext, host: KeyPair, - flags: set[HandshakeFlag] = {Initiator}, - version: uint8 = SupportedRlpxVersion): T = + T: type Handshake, + rng: var HmacDrbgContext, + host: KeyPair, + flags: set[HandshakeFlag], +): T = ## Create new `Handshake` object. var initiatorNonce: Nonce responderNonce: Nonce - expectedLength: int ephemeral = KeyPair.random(rng) if Initiator in flags: - expectedLength = AckMessageV4Length rng.generate(initiatorNonce) else: - expectedLength = AuthMessageV4Length rng.generate(responderNonce) return T( - version: version, flags: flags, host: host, ephemeral: ephemeral, initiatorNonce: initiatorNonce, responderNonce: responderNonce, - expectedLength: expectedLength ) -proc authMessagePreEIP8(h: var Handshake, - rng: var HmacDrbgContext, - pubkey: PublicKey, - output: var openArray[byte], - outlen: var int, - flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create plain pre-EIP8 authentication message. - var - buffer: array[PlainAuthMessageV4Length, byte] - outlen = 0 - let header = cast[ptr AuthMessageV4](addr buffer[0]) +proc authMessage*( + h: var Handshake, + rng: var HmacDrbgContext, + pubkey: PublicKey, + output: var openArray[byte], +): AuthResult[int] = + ## Create EIP8 authentication message - returns length of encoded message + ## The output should be a buffer of AuthMessageMaxEIP8 bytes at least. + if len(output) < AuthMessageMaxEIP8: + return err(AuthError.BufferOverrun) - var secret = ecdhSharedSecret(h.host.seckey, pubkey) - secret.data = secret.data xor h.initiatorNonce - - let signature = sign(h.ephemeral.seckey, SkMessage(secret.data)) - secret.clear() - - h.remoteHPubkey = pubkey - header.signature = signature.toRaw() - header.keyhash = keccak256.digest(h.ephemeral.pubkey.toRaw()).data - header.pubkey = h.host.pubkey.toRaw() - header.nonce = h.initiatorNonce - header.flag = flag - if encrypt: - if len(output) < AuthMessageV4Length: - return err(AuthError.BufferOverrun) - if eciesEncrypt(rng, buffer, output, h.remoteHPubkey).isErr: - return err(AuthError.EciesError) - outlen = AuthMessageV4Length - else: - if len(output) < PlainAuthMessageV4Length: - return err(AuthError.BufferOverrun) - copyMem(addr output[0], addr buffer[0], PlainAuthMessageV4Length) - outlen = PlainAuthMessageV4Length - - ok() - -proc authMessageEIP8(h: var Handshake, - rng: var HmacDrbgContext, - pubkey: PublicKey, - output: var openArray[byte], - outlen: var int, - flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create EIP8 authentication message. - var - buffer: array[PlainAuthMessageMaxEIP8, byte] - - doAssert(EIP8 in h.flags) - outlen = 0 - - var secret = ecdhSharedSecret(h.host.seckey, pubkey) - secret.data = secret.data xor h.initiatorNonce - - let signature = sign(h.ephemeral.seckey, SkMessage(secret.data)) - secret.clear() - - h.remoteHPubkey = pubkey - var payload = rlp.encodeList(signature.toRaw(), - h.host.pubkey.toRaw(), - h.initiatorNonce, - [byte(h.version)]) - doAssert(len(payload) == PlainAuthMessageEIP8Length) - let - pencsize = eciesEncryptedLength(len(payload)) - - var padsize = int(rng.generate(byte)) # aka rand(max) - while padsize <= (AuthMessageV4Length - (pencsize + 2)): + var padsize = int(rng.generate(byte)) + while padsize > (MaxPadLenEIP8 - MinPadLenEIP8): padsize = int(rng.generate(byte)) + padsize += MinPadLenEIP8 - # It is possible to make packet size constant by uncommenting this line - # padsize = 24 let + pencsize = eciesEncryptedLength(PlainAuthMessageEIP8Length) wosize = pencsize + padsize fullsize = wosize + 2 - rng.generate(toa(buffer, PlainAuthMessageEIP8Length, padsize)) - - if encrypt: - if len(output) < fullsize: - return err(AuthError.BufferOverrun) - - copyMem(addr buffer[0], addr payload[0], len(payload)) - - let wosizeBE = uint16(wosize).toBytesBE() - output[0..<2] = wosizeBE - if eciesEncrypt(rng, toa(buffer, 0, len(payload) + padsize), - toa(output, 2, wosize), pubkey, - toa(output, 0, 2)).isErr: - return err(AuthError.EciesError) - outlen = fullsize - else: - let plainsize = len(payload) + padsize - if len(output) < plainsize: - return err(AuthError.BufferOverrun) - copyMem(addr output[0], addr buffer[0], plainsize) - outlen = plainsize - - ok() - -proc ackMessagePreEIP8(h: var Handshake, - rng: var HmacDrbgContext, - output: var openArray[byte], - outlen: var int, - flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create plain pre-EIP8 authentication ack message. - var buffer: array[PlainAckMessageV4Length, byte] - outlen = 0 - let header = cast[ptr AckMessageV4](addr buffer[0]) - header.pubkey = h.ephemeral.pubkey.toRaw() - header.nonce = h.responderNonce - header.flag = flag - if encrypt: - if len(output) < AckMessageV4Length: - return err(AuthError.BufferOverrun) - if eciesEncrypt(rng, buffer, output, h.remoteHPubkey).isErr: - return err(AuthError.EciesError) - outlen = AckMessageV4Length - else: - if len(output) < PlainAckMessageV4Length: - return err(AuthError.BufferOverrun) - copyMem(addr output[0], addr buffer[0], PlainAckMessageV4Length) - outlen = PlainAckMessageV4Length - - ok() - -proc ackMessageEIP8(h: var Handshake, - rng: var HmacDrbgContext, - output: var openArray[byte], - outlen: var int, - flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create EIP8 authentication ack message. - var - buffer: array[PlainAckMessageMaxEIP8, byte] - padsize: array[1, byte] - doAssert(EIP8 in h.flags) - var payload = rlp.encodeList(h.ephemeral.pubkey.toRaw(), - h.responderNonce, - [byte(h.version)]) - doAssert(len(payload) == PlainAckMessageEIP8Length) - outlen = 0 - let pencsize = eciesEncryptedLength(len(payload)) - while true: - generate(rng, padsize) - if int(padsize[0]) > (AckMessageV4Length - (pencsize + 2)): - break - # It is possible to make packet size constant by uncommenting this line - # padsize = 0 - let wosize = pencsize + int(padsize[0]) - let fullsize = wosize + 2 - if int(padsize[0]) > 0: - rng.generate(toa(buffer, PlainAckMessageEIP8Length, int(padsize[0]))) - - copyMem(addr buffer[0], addr payload[0], len(payload)) - if encrypt: - if len(output) < fullsize: - return err(AuthError.BufferOverrun) - output[0..<2] = uint16(wosize).toBytesBE() - if eciesEncrypt(rng, toa(buffer, 0, len(payload) + int(padsize[0])), - toa(output, 2, wosize), h.remoteHPubkey, - toa(output, 0, 2)).isErr: - return err(AuthError.EciesError) - outlen = fullsize - else: - let plainsize = len(payload) + int(padsize[0]) - if len(output) < plainsize: - return err(AuthError.BufferOverrun) - copyMem(addr output[0], addr buffer[0], plainsize) - outlen = plainsize - - ok() - -template authSize*(h: Handshake, encrypt: bool = true): int = - ## Get number of bytes needed to store AuthMessage. - if EIP8 in h.flags: - if encrypt: (AuthMessageMaxEIP8) else: (PlainAuthMessageMaxEIP8) - else: - if encrypt: (AuthMessageV4Length) else: (PlainAuthMessageV4Length) - -template ackSize*(h: Handshake, encrypt: bool = true): int = - ## Get number of bytes needed to store AckMessage. - if EIP8 in h.flags: - if encrypt: (AckMessageMaxEIP8) else: (PlainAckMessageMaxEIP8) - else: - if encrypt: (AckMessageV4Length) else: (PlainAckMessageV4Length) - -proc authMessage*(h: var Handshake, rng: var HmacDrbgContext, - pubkey: PublicKey, - output: var openArray[byte], - outlen: var int, flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create new AuthMessage for specified `pubkey` and store it inside - ## of `output`, size of generated AuthMessage will stored in `outlen`. - if EIP8 in h.flags: - authMessageEIP8(h, rng, pubkey, output, outlen, flag, encrypt) - else: - authMessagePreEIP8(h, rng, pubkey, output, outlen, flag, encrypt) - -proc ackMessage*(h: var Handshake, rng: var HmacDrbgContext, - output: var openArray[byte], - outlen: var int, flag: byte = 0, - encrypt: bool = true): AuthResult[void] = - ## Create new AckMessage and store it inside of `output`, size of generated - ## AckMessage will stored in `outlen`. - if EIP8 in h.flags: - ackMessageEIP8(h, rng, output, outlen, flag, encrypt) - else: - ackMessagePreEIP8(h, rng, output, outlen, flag, encrypt) - -proc decodeAuthMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] = - ## Decodes V4 AuthMessage. - var - buffer: array[PlainAuthMessageV4Length, byte] - - doAssert(Responder in h.flags) - if eciesDecrypt(m, buffer, h.host.seckey).isErr: - return err(EciesError) - - let - header = cast[ptr AuthMessageV4](addr buffer[0]) - pubkey = ? PublicKey.fromRaw(header.pubkey).mapErrTo(InvalidPubKey) - signature = ? Signature.fromRaw(header.signature).mapErrTo(SignatureError) + doAssert fullsize <= len(output), "We checked against max possible length above" var secret = ecdhSharedSecret(h.host.seckey, pubkey) - secret.data = secret.data xor header.nonce + secret.data = secret.data xor h.initiatorNonce - var recovered = recover(signature, SkMessage(secret.data)) + let signature = sign(h.ephemeral.seckey, SkMessage(secret.data)) secret.clear() - h.remoteEPubkey = ? recovered.mapErrTo(SignatureError) - h.initiatorNonce = header.nonce h.remoteHPubkey = pubkey + var payload = + rlp.encodeList(signature.toRaw(), h.host.pubkey.toRaw(), h.initiatorNonce, Vsn) + doAssert(len(payload) == PlainAuthMessageEIP8Length) - ok() + var buffer {.noinit.}: array[PlainAuthMessageMaxEIP8, byte] + copyMem(addr buffer[0], addr payload[0], len(payload)) + rng.generate(toa(buffer, PlainAuthMessageEIP8Length, padsize)) -proc decodeAuthMessageEIP8(h: var Handshake, m: openArray[byte]): AuthResult[void] = + let wosizeBE = uint16(wosize).toBytesBE() + output[0 ..< 2] = wosizeBE + if eciesEncrypt( + rng, + toa(buffer, 0, len(payload) + padsize), + toa(output, 2, wosize), + pubkey, + toa(output, 0, 2), + ).isErr: + return err(AuthError.EciesError) + + ok(fullsize) + +proc ackMessage*( + h: var Handshake, rng: var HmacDrbgContext, output: var openArray[byte] +): AuthResult[int] = + ## Create EIP8 authentication ack message - returns length of encoded message + ## The output should be a buffer of AckMessageMaxEIP8 bytes at least. + if len(output) < AckMessageMaxEIP8: + return err(AuthError.BufferOverrun) + + var padsize = int(rng.generate(byte)) + while padsize > (MaxPadLenEIP8 - MinPadLenEIP8): + padsize = int(rng.generate(byte)) + padsize += MinPadLenEIP8 + + let + pencsize = eciesEncryptedLength(PlainAckMessageEIP8Length) + wosize = pencsize + padsize + fullsize = wosize + 2 + + doAssert fullsize <= len(output), "We checked against max possible length above" + + var + buffer: array[PlainAckMessageMaxEIP8, byte] + payload = rlp.encodeList(h.ephemeral.pubkey.toRaw(), h.responderNonce, Vsn) + doAssert(len(payload) == PlainAckMessageEIP8Length) + + copyMem(addr buffer[0], addr payload[0], PlainAckMessageEIP8Length) + rng.generate(toa(buffer, PlainAckMessageEIP8Length, padsize)) + + output[0 ..< MsgLenLenEIP8] = uint16(wosize).toBytesBE() + + if eciesEncrypt( + rng, + toa(buffer, 0, PlainAckMessageEIP8Length + padsize), + toa(output, MsgLenLenEIP8, wosize), + h.remoteHPubkey, + toa(output, 0, MsgLenLenEIP8), + ).isErr: + return err(AuthError.EciesError) + ok(fullsize) + +func decodeMsgLen(input: openArray[byte]): AuthResult[int] = + if input.len < 2: + return err(AuthError.IncompleteError) + ok(int(uint16.fromBytesBE(input)) + 2) + +func decodeAuthMsgLen*(h: Handshake, input: openArray[byte]): AuthResult[int] = + let len = ?decodeMsgLen(input) + if len < AuthMessageEIP8Length: + return err(AuthError.IncompleteError) + ok(len) + +func decodeAckMsgLen*(h: Handshake, input: openArray[byte]): AuthResult[int] = + let len = ?decodeMsgLen(input) + if len < AckMessageEIP8Length: + return err(AuthError.IncompleteError) + ok(len) + +proc decodeAuthMessage*(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes EIP-8 AuthMessage. - let size = uint16.fromBytesBE(m) - h.expectedLength = 2 + int(size) + let + expectedLength = ?h.decodeAuthMsgLen(m) + size = expectedLength - MsgLenLenEIP8 # Check if the prefixed size is => than the minimum - if h.expectedLength < AuthMessageEIP8Length: + if expectedLength < AuthMessageEIP8Length: return err(AuthError.IncompleteError) - if h.expectedLength > len(m): + if expectedLength > len(m): return err(AuthError.IncompleteError) - var buffer = newSeq[byte](eciesDecryptedLength(int(size))) + var buffer = newSeq[byte](eciesDecryptedLength(size)) if eciesDecrypt( - toa(m, 2, int(size)), buffer, h.host.seckey, toa(m, 0, 2)).isErr: + toa(m, MsgLenLenEIP8, int(size)), buffer, h.host.seckey, toa(m, 0, MsgLenLenEIP8) + ).isErr: return err(AuthError.EciesError) + try: var reader = rlpFromBytes(buffer) if not reader.isList() or reader.listLen() < 4: @@ -412,11 +278,9 @@ proc decodeAuthMessageEIP8(h: var Handshake, m: openArray[byte]): AuthResult[voi signatureBr = reader.listElem(0).toBytes() pubkeyBr = reader.listElem(1).toBytes() nonceBr = reader.listElem(2).toBytes() - versionBr = reader.listElem(3).toBytes() - let - signature = ? Signature.fromRaw(signatureBr).mapErrTo(SignatureError) - pubkey = ? PublicKey.fromRaw(pubkeyBr).mapErrTo(InvalidPubKey) + signature = ?Signature.fromRaw(signatureBr).mapErrTo(SignatureError) + pubkey = ?PublicKey.fromRaw(pubkeyBr).mapErrTo(InvalidPubKey) nonce = toArray(KeyLength, nonceBr) var secret = ecdhSharedSecret(h.host.seckey, pubkey) @@ -425,100 +289,52 @@ proc decodeAuthMessageEIP8(h: var Handshake, m: openArray[byte]): AuthResult[voi let recovered = recover(signature, SkMessage(secret.data)) secret.clear() - h.remoteEPubkey = ? recovered.mapErrTo(SignatureError) + h.remoteEPubkey = ?recovered.mapErrTo(SignatureError) h.initiatorNonce = nonce h.remoteHPubkey = pubkey - h.version = versionBr[0] ok() except CatchableError: err(AuthError.RlpError) -proc decodeAckMessageEIP8*(h: var Handshake, m: openArray[byte]): AuthResult[void] = +proc decodeAckMessage*(h: var Handshake, m: openArray[byte]): AuthResult[void] = ## Decodes EIP-8 AckMessage. - let size = uint16.fromBytesBE(m) - h.expectedLength = 2 + int(size) + let + expectedLength = ?h.decodeAckMsgLen(m) + size = expectedLength - MsgLenLenEIP8 # Check if the prefixed size is => than the minimum - if h.expectedLength < AckMessageEIP8Length: + if expectedLength > len(m): return err(AuthError.IncompleteError) - if h.expectedLength > len(m): - return err(AuthError.IncompleteError) - - var buffer = newSeq[byte](eciesDecryptedLength(int(size))) + var buffer = newSeq[byte](eciesDecryptedLength(size)) if eciesDecrypt( - toa(m, 2, int(size)), buffer, h.host.seckey, toa(m, 0, 2)).isErr: + toa(m, MsgLenLenEIP8, size), buffer, h.host.seckey, toa(m, 0, MsgLenLenEIP8) + ).isErr: return err(AuthError.EciesError) try: var reader = rlpFromBytes(buffer) + # The last element, the version, is ignored if not reader.isList() or reader.listLen() < 3: return err(AuthError.InvalidAck) if reader.listElem(0).blobLen != RawPublicKeySize: return err(AuthError.InvalidAck) if reader.listElem(1).blobLen != KeyLength: return err(AuthError.InvalidAck) - if reader.listElem(2).blobLen != 1: - return err(AuthError.InvalidAck) + let pubkeyBr = reader.listElem(0).toBytes() nonceBr = reader.listElem(1).toBytes() - versionBr = reader.listElem(2).toBytes() - h.remoteEPubkey = ? PublicKey.fromRaw(pubkeyBr).mapErrTo(InvalidPubKey) + h.remoteEPubkey = ?PublicKey.fromRaw(pubkeyBr).mapErrTo(InvalidPubKey) h.responderNonce = toArray(KeyLength, nonceBr) - h.version = versionBr[0] ok() except CatchableError: err(AuthError.RlpError) -proc decodeAckMessageV4(h: var Handshake, m: openArray[byte]): AuthResult[void] = - ## Decodes V4 AckMessage. - var - buffer: array[PlainAckMessageV4Length, byte] - doAssert(Initiator in h.flags) - - if eciesDecrypt(m, buffer, h.host.seckey).isErr: - return err(AuthError.EciesError) - var header = cast[ptr AckMessageV4](addr buffer[0]) - - h.remoteEPubkey = ? PublicKey.fromRaw(header.pubkey).mapErrTo(InvalidPubKey) - h.responderNonce = header.nonce - - ok() - -proc decodeAuthMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] = - ## Decodes AuthMessage from `input`. - # Using the smallest min. message length of the two types - if len(input) < AuthMessageEIP8Length: - return err(AuthError.IncompleteError) - - if len(input) == AuthMessageV4Length: - let res = h.decodeAuthMessageV4(input) - if res.isOk(): return res - - let res = h.decodeAuthMessageEIP8(input) - if res.isOk(): - h.flags.incl(EIP8) - res - -proc decodeAckMessage*(h: var Handshake, input: openArray[byte]): AuthResult[void] = - ## Decodes AckMessage from `input`. - # Using the smallest min. message length of the two types - if len(input) < AckMessageV4Length: - return err(AuthError.IncompleteError) - - if len(input) == AckMessageV4Length: - let res = h.decodeAckMessageV4(input) - if res.isOk(): return res - - let res = h.decodeAckMessageEIP8(input) - if res.isOk(): h.flags.incl(EIP8) - res - proc getSecrets*( - h: Handshake, authmsg: openArray[byte], - ackmsg: openArray[byte]): ConnectionSecret = + h: Handshake, authmsg: openArray[byte], ackmsg: openArray[byte] +): ConnectionSecret = ## Derive secrets from handshake `h` using encrypted AuthMessage `authmsg` and ## encrypted AckMessage `ackmsg`. var diff --git a/eth/p2p/discovery.nim b/eth/p2p/discovery.nim index e089dfb..aa0e722 100644 --- a/eth/p2p/discovery.nim +++ b/eth/p2p/discovery.nim @@ -9,24 +9,27 @@ import std/[times, net], - chronos, stint, nimcrypto/keccak, chronicles, - stew/objects, results, + chronos, + stint, + nimcrypto/keccak, + chronicles, + stew/objects, + results, ../rlp, ../common/keys, "."/[kademlia, enode] -export - Node, results +export Node, results logScope: topics = "eth p2p discovery" const # UDP packet constants. - MAC_SIZE = 256 div 8 # 32 - SIG_SIZE = 520 div 8 # 65 - HEAD_SIZE = MAC_SIZE + SIG_SIZE # 97 - EXPIRATION = 60 # let messages expire after N seconds + MAC_SIZE = 256 div 8 # 32 + SIG_SIZE = 520 div 8 # 65 + HEAD_SIZE = MAC_SIZE + SIG_SIZE # 97 + EXPIRATION = 60 # let messages expire after N seconds PROTO_VERSION = 4 type @@ -55,9 +58,14 @@ proc append*(w: var RlpWriter, a: IpAddress) = of IpAddressFamily.IPv4: w.append(a.address_v4) -proc append(w: var RlpWriter, p: Port) = w.append(p.uint) -proc append(w: var RlpWriter, pk: PublicKey) = w.append(pk.toRaw()) -proc append(w: var RlpWriter, h: MDigest[256]) = w.append(h.data) +proc append(w: var RlpWriter, p: Port) = + w.append(p.uint) + +proc append(w: var RlpWriter, pk: PublicKey) = + w.append(pk.toRaw()) + +proc append(w: var RlpWriter, h: MDigest[256]) = + w.append(h.data) proc pack(cmdId: CommandId, payload: openArray[byte], pk: PrivateKey): seq[byte] = ## Create and sign a UDP message to be sent to a remote node. @@ -65,11 +73,13 @@ proc pack(cmdId: CommandId, payload: openArray[byte], pk: PrivateKey): seq[byte] ## See https://github.com/ethereum/devp2p/blob/master/rlpx.md#node-discovery for information on ## how UDP packets are structured. - # TODO: There is a lot of unneeded allocations here - let encodedData = @[cmdId.byte] & @payload - let signature = @(pk.sign(encodedData).toRaw()) - let msgHash = keccak256.digest(signature & encodedData) - result = @(msgHash.data) & signature & encodedData + result = newSeq[byte](HEAD_SIZE + 1 + payload.len) + result[HEAD_SIZE] = cmdId.byte + result[HEAD_SIZE + 1 ..< result.len] = payload + result[MAC_SIZE ..< MAC_SIZE + SIG_SIZE] = + pk.sign(result.toOpenArray(HEAD_SIZE, result.high)).toRaw() + result[0 ..< MAC_SIZE] = + keccak256.digest(result.toOpenArray(MAC_SIZE, result.high)).data proc validateMsgHash(msg: openArray[byte]): DiscResult[MDigest[256]] = if msg.len > HEAD_SIZE: @@ -85,20 +95,20 @@ proc validateMsgHash(msg: openArray[byte]): DiscResult[MDigest[256]] = proc recoverMsgPublicKey(msg: openArray[byte]): DiscResult[PublicKey] = if msg.len <= HEAD_SIZE: return err("disc: can't get public key") - let sig = ? Signature.fromRaw(msg.toOpenArray(MAC_SIZE, HEAD_SIZE)) + let sig = ?Signature.fromRaw(msg.toOpenArray(MAC_SIZE, HEAD_SIZE)) recover(sig, msg.toOpenArray(HEAD_SIZE, msg.high)) -proc unpack(msg: openArray[byte]): tuple[cmdId: CommandId, payload: seq[byte]] - {.raises: [DiscProtocolError].} = +proc unpack( + msg: openArray[byte] +): tuple[cmdId: CommandId, payload: seq[byte]] {.raises: [DiscProtocolError].} = # Check against possible RangeDefect - if msg[HEAD_SIZE].int < CommandId.low.ord or - msg[HEAD_SIZE].int > CommandId.high.ord: + if msg[HEAD_SIZE].int < CommandId.low.ord or msg[HEAD_SIZE].int > CommandId.high.ord: raise newException(DiscProtocolError, "Unsupported packet id") - result = (cmdId: msg[HEAD_SIZE].CommandId, payload: msg[HEAD_SIZE + 1 .. ^1]) + (cmdId: msg[HEAD_SIZE].CommandId, payload: msg[HEAD_SIZE + 1 .. ^1]) -proc expiration(): uint32 = - result = uint32(epochTime() + EXPIRATION) +proc expiration(): uint64 = + uint64(getTime().toUnix() + EXPIRATION) # Wire protocol @@ -110,15 +120,16 @@ proc send(d: DiscoveryProtocol, n: Node, data: seq[byte]) = when defined(chronicles_log_level): try: # readError will raise FutureError - debug "Discovery send failed", msg = f.readError.msg + debug "Discovery send failed", + msg = f.readError.msg, address = $n.node.address except FutureError as exc: - error "Failed to get discovery send future error", msg=exc.msg + error "Failed to get discovery send future error", msg = exc.msg f.addCallback cb proc sendPing*(d: DiscoveryProtocol, n: Node): seq[byte] = - let payload = rlp.encode((PROTO_VERSION.uint, d.address, n.node.address, - expiration())) + let payload = + rlp.encode((PROTO_VERSION.uint, d.address, n.node.address, expiration())) let msg = pack(cmdPing, payload, d.privKey) result = msg[0 ..< MAC_SIZE] trace ">>> ping ", n @@ -135,7 +146,7 @@ proc sendFindNode*(d: DiscoveryProtocol, n: Node, targetNodeId: NodeId) = data[32 .. ^1] = targetNodeId.toByteArrayBE() let payload = rlp.encode((data, expiration())) let msg = pack(cmdFindNode, payload, d.privKey) - trace ">>> find_node to ", n#, ": ", msg.toHex() + trace ">>> find_node to ", n #, ": ", msg.toHex() d.send(n, msg) proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) = @@ -152,18 +163,23 @@ proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) = nodes.setLen(0) for i, n in neighbours: - nodes.add((n.node.address.ip, n.node.address.udpPort, - n.node.address.tcpPort, n.node.pubkey)) + nodes.add( + (n.node.address.ip, n.node.address.udpPort, n.node.address.tcpPort, n.node.pubkey) + ) if nodes.len == MAX_NEIGHBOURS_PER_PACKET: flush() - if nodes.len != 0: flush() + if nodes.len != 0: + flush() proc newDiscoveryProtocol*( - privKey: PrivateKey, address: Address, + privKey: PrivateKey, + address: Address, bootstrapNodes: openArray[ENode], - bindPort: Port, bindIp = IPv4_any(), - rng = newRng()): DiscoveryProtocol = + bindPort: Port, + bindIp = IPv6_any(), + rng = newRng(), +): DiscoveryProtocol = let localNode = newNode(privKey.toPublicKey(), address) discovery = DiscoveryProtocol( @@ -171,27 +187,32 @@ proc newDiscoveryProtocol*( address: address, localNode: localNode, bindIp: bindIp, - bindPort: bindPort) + bindPort: bindPort, + ) kademlia = newKademliaProtocol(localNode, discovery, rng = rng) discovery.kademlia = kademlia - for n in bootstrapNodes: discovery.bootstrapNodes.add(newNode(n)) + for n in bootstrapNodes: + discovery.bootstrapNodes.add(newNode(n)) discovery -proc recvPing(d: DiscoveryProtocol, node: Node, msgHash: MDigest[256]) - {.raises: [ValueError].} = +proc recvPing( + d: DiscoveryProtocol, node: Node, msgHash: MDigest[256] +) {.raises: [ValueError].} = d.kademlia.recvPing(node, msgHash) -proc recvPong(d: DiscoveryProtocol, node: Node, payload: seq[byte]) - {.raises: [RlpError].} = +proc recvPong( + d: DiscoveryProtocol, node: Node, payload: seq[byte] +) {.raises: [RlpError].} = let rlp = rlpFromBytes(payload) let tok = rlp.listElem(1).toBytes() d.kademlia.recvPong(node, tok) -proc recvNeighbours(d: DiscoveryProtocol, node: Node, payload: seq[byte]) - {.raises: [RlpError].} = +proc recvNeighbours( + d: DiscoveryProtocol, node: Node, payload: seq[byte] +) {.raises: [RlpError].} = let rlp = rlpFromBytes(payload) let neighboursList = rlp.listElem(0) let sz = neighboursList.listLen() @@ -203,11 +224,9 @@ proc recvNeighbours(d: DiscoveryProtocol, node: Node, payload: seq[byte]) var ip: IpAddress case ipBlob.len of 4: - ip = IpAddress( - family: IpAddressFamily.IPv4, address_v4: toArray(4, ipBlob)) + ip = IpAddress(family: IpAddressFamily.IPv4, address_v4: toArray(4, ipBlob)) of 16: - ip = IpAddress( - family: IpAddressFamily.IPv6, address_v6: toArray(16, ipBlob)) + ip = IpAddress(family: IpAddressFamily.IPv6, address_v6: toArray(16, ipBlob)) else: error "Wrong ip address length!" continue @@ -222,8 +241,9 @@ proc recvNeighbours(d: DiscoveryProtocol, node: Node, payload: seq[byte]) neighbours.add(newNode(pk[], Address(ip: ip, udpPort: udpPort, tcpPort: tcpPort))) d.kademlia.recvNeighbours(node, neighbours) -proc recvFindNode(d: DiscoveryProtocol, node: Node, payload: openArray[byte]) - {.raises: [RlpError, ValueError].} = +proc recvFindNode( + d: DiscoveryProtocol, node: Node, payload: openArray[byte] +) {.raises: [RlpError, ValueError].} = let rlp = rlpFromBytes(payload) trace "<<< find_node from ", node let rng = rlp.listElem(0).toBytes @@ -234,8 +254,9 @@ proc recvFindNode(d: DiscoveryProtocol, node: Node, payload: openArray[byte]) else: trace "Invalid target public key received" -proc expirationValid(cmdId: CommandId, rlpEncodedPayload: openArray[byte]): - bool {.raises: [DiscProtocolError, RlpError].} = +proc expirationValid( + cmdId: CommandId, rlpEncodedPayload: openArray[byte] +): bool {.raises: [DiscProtocolError, RlpError].} = ## Can only raise `DiscProtocolError` and all of `RlpError` # Check if there is a payload if rlpEncodedPayload.len <= 0: @@ -250,8 +271,9 @@ proc expirationValid(cmdId: CommandId, rlpEncodedPayload: openArray[byte]): else: raise newException(DiscProtocolError, "Invalid RLP list for this packet id") -proc receive*(d: DiscoveryProtocol, a: Address, msg: openArray[byte]) - {.raises: [DiscProtocolError, RlpError, ValueError].} = +proc receive*( + d: DiscoveryProtocol, a: Address, msg: openArray[byte] +) {.raises: [DiscProtocolError, RlpError, ValueError].} = # Note: export only needed for testing let msgHash = validateMsgHash(msg) if msgHash.isOk(): @@ -280,17 +302,20 @@ proc receive*(d: DiscoveryProtocol, a: Address, msg: openArray[byte]) else: notice "Wrong msg mac from ", a -proc processClient(transp: DatagramTransport, raddr: TransportAddress): - Future[void] {.async: (raises: []).} = +proc processClient( + transp: DatagramTransport, raddr: TransportAddress +): Future[void] {.async: (raises: []).} = var proto = getUserData[DiscoveryProtocol](transp) - let buf = try: transp.getMessage() - except TransportOsError as e: - # This is likely to be local network connection issues. - warn "Transport getMessage", exception = e.name, msg = e.msg - return - except TransportError as exc: - debug "getMessage error", msg = exc.msg - return + let buf = + try: + transp.getMessage() + except TransportOsError as e: + # This is likely to be local network connection issues. + warn "Transport getMessage", exception = e.name, msg = e.msg + return + except TransportError as exc: + debug "getMessage error", msg = exc.msg + return try: let a = Address(ip: raddr.address, udpPort: raddr.port, tcpPort: raddr.port) proto.receive(a, buf) @@ -326,26 +351,30 @@ proc randomNodes*(d: DiscoveryProtocol, count: int): seq[Node] = d.kademlia.randomNodes(count) when isMainModule: - import logging, stew/byteutils - - const LOCAL_BOOTNODES = [ - "enode://6456719e7267e061161c88720287a77b80718d2a3a4ff5daeba614d029dc77601b75e32190aed1c9b0b9ccb6fac3bcf000f48e54079fa79e339c25d8e9724226@127.0.0.1:30301" - ] - - addHandler(newConsoleLogger()) + import stew/byteutils, ./bootnodes block: - let m = hexToSeqByte"79664bff52ee17327b4a2d8f97d8fb32c9244d719e5038eb4f6b64da19ca6d271d659c3ad9ad7861a928ca85f8d8debfbe6b7ade26ad778f2ae2ba712567fcbd55bc09eb3e74a893d6b180370b266f6aaf3fe58a0ad95f7435bf3ddf1db940d20102f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2" + let m = + hexToSeqByte"79664bff52ee17327b4a2d8f97d8fb32c9244d719e5038eb4f6b64da19ca6d271d659c3ad9ad7861a928ca85f8d8debfbe6b7ade26ad778f2ae2ba712567fcbd55bc09eb3e74a893d6b180370b266f6aaf3fe58a0ad95f7435bf3ddf1db940d20102f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2" discard validateMsgHash(m).expect("valid hash") var remotePubkey = recoverMsgPublicKey(m).expect("valid key") let (cmdId, payload) = unpack(m) - doAssert(payload == hexToSeqByte"f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2") + doAssert( + payload == + hexToSeqByte"f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2" + ) doAssert(cmdId == cmdPong) - doAssert(remotePubkey == PublicKey.fromHex( - "78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d")[]) + doAssert( + remotePubkey == + PublicKey.fromHex( + "78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d" + )[] + ) - let privKey = PrivateKey.fromHex("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")[] + let privKey = PrivateKey.fromHex( + "a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617" + )[] # echo privKey @@ -355,14 +384,14 @@ when isMainModule: # let (remotePubkey, cmdId, payload) = unpack(m) # doAssert(remotePubkey.raw_key.toHex == privKey.public_key.raw_key.toHex) - var bootnodes = newSeq[ENode]() - for item in LOCAL_BOOTNODES: - bootnodes.add(ENode.fromString(item)[]) + var nodes = newSeq[ENode]() + for item in MainnetBootnodes: + nodes.add(ENode.fromString(item)[]) let listenPort = Port(30310) var address = Address(udpPort: listenPort, tcpPort: listenPort) address.ip.family = IpAddressFamily.IPv4 - let discovery = newDiscoveryProtocol(privkey, address, bootnodes, listenPort) + let discovery = newDiscoveryProtocol(privKey, address, nodes, bindPort = listenPort) echo discovery.localNode.node.pubkey echo "this_node.id: ", discovery.localNode.id.toHex() @@ -372,5 +401,6 @@ when isMainModule: proc test() {.async.} = {.gcsafe.}: await discovery.bootstrap() - + for node in discovery.randomNodes(discovery.kademlia.nodesDiscovered): + echo node waitFor test() diff --git a/eth/p2p/discoveryv5/routing_table.nim b/eth/p2p/discoveryv5/routing_table.nim index 08b202d..7385c08 100644 --- a/eth/p2p/discoveryv5/routing_table.nim +++ b/eth/p2p/discoveryv5/routing_table.nim @@ -545,7 +545,8 @@ proc randomNodes*(r: RoutingTable, maxAmount: int, # while it will take less total time compared to e.g. an (async) # randomLookup, the time might be wasted as all nodes are possibly seen # already. - while len(seen) < maxAmount: + # We check against the number of nodes to avoid an infinite loop in case of a filter. + while len(result) < maxAmount and len(seen) < sz: let bucket = r.rng[].sample(r.buckets) if bucket.nodes.len != 0: let node = r.rng[].sample(bucket.nodes) diff --git a/eth/p2p/kademlia.nim b/eth/p2p/kademlia.nim index ad77f64..e3d49d7 100644 --- a/eth/p2p/kademlia.nim +++ b/eth/p2p/kademlia.nim @@ -49,7 +49,6 @@ type istart, iend: UInt256 nodes: seq[Node] replacementCache: seq[Node] - lastUpdated: float # epochTime CommandId* = enum cmdPing = 1 @@ -216,7 +215,6 @@ proc add(k: KBucket, n: Node): Node = ## If the bucket is full, we add the node to the bucket's replacement cache and return the ## node at the head of the list (i.e. the least recently seen), which should be evicted if it ## fails to respond to a ping. - k.lastUpdated = epochTime() let nodeIdx = k.nodes.find(n) if nodeIdx != -1: k.nodes.delete(nodeIdx) diff --git a/eth/p2p/p2p_backends_helpers.nim b/eth/p2p/p2p_backends_helpers.nim index bc0597e..b04b9b0 100644 --- a/eth/p2p/p2p_backends_helpers.nim +++ b/eth/p2p/p2p_backends_helpers.nim @@ -16,7 +16,7 @@ let protocolManager = ProtocolManager() proc registerProtocol*(proto: ProtocolInfo) {.gcsafe.} = {.gcsafe.}: proto.index = protocolManager.protocols.len - if proto.name == "p2p": + if proto.capability.name == "p2p": doAssert(proto.index == 0) protocolManager.protocols.add proto diff --git a/eth/p2p/p2p_protocol_dsl.nim b/eth/p2p/p2p_protocol_dsl.nim index ef3644f..65eb450 100644 --- a/eth/p2p/p2p_protocol_dsl.nim +++ b/eth/p2p/p2p_protocol_dsl.nim @@ -25,7 +25,7 @@ import std/[options, sequtils, macrocache], results, - stew/shims/macros, chronos, faststreams/outputs + stew/shims/macros, chronos type MessageKind* = enum @@ -35,7 +35,7 @@ type msgResponse Message* = ref object - id*: Opt[uint64] + id*: uint64 ident*: NimNode kind*: MessageKind procDef*: NimNode @@ -351,15 +351,17 @@ proc init*(T: type P2PProtocol, backendFactory: BackendFactory, if not result.backend.afterProtocolInit.isNil: result.backend.afterProtocolInit(result) -proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = Opt.none(uint64)) = +proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, canRaise: bool, msgId = Opt.none(uint64)) = ## This procs adds a set of common helpers available in all messages handlers ## (e.g. `perProtocolMsgId`, `peer.state`, etc). - userHandlerProc.addPragma ident"gcsafe" - # we only take the pragma - let dummy = quote do: - proc dummy(): Future[void] {.async: (raises: [EthP2PError]).} + let dummy = if canRaise: + quote do: + proc dummy(): Future[void] {.async: (raises: [CancelledError, EthP2PError]).} + else: + quote do: + proc dummy(): Future[void] {.async: (raises: []).} if p.isRlpx: userHandlerProc.addPragma dummy.pragma[0] @@ -402,27 +404,16 @@ proc augmentUserHandler(p: P2PProtocol, userHandlerProc: NimNode, msgId = Opt.no template networkState(`peerVar`: `PeerType`): `NetworkStateType` {.used.} = `NetworkStateType`(`getNetworkState`(`peerVar`.network, `protocolInfo`)) -proc addExceptionHandler(userHandlerProc: NimNode) = - let bodyTemp = userHandlerProc.body - userHandlerProc.body = quote do: - try: - `bodyTemp` - except CancelledError as exc: - raise newException(EthP2PError, exc.msg) - except CatchableError as exc: - raise newException(EthP2PError, exc.msg) - proc addPreludeDefs(userHandlerProc: NimNode, definitions: NimNode) = userHandlerProc.body[0].add definitions -proc eventHandlerToProc(p: P2PProtocol, doBlock: NimNode, handlerName: string): NimNode = +proc eventHandlerToProc(p: P2PProtocol, doBlock: NimNode, handlerName: string, canRaise: bool): NimNode = ## Turns a "named" do block to a regular async proc ## (e.g. onPeerConnected do ...) result = newTree(nnkProcDef) doBlock.copyChildrenTo(result) result.name = ident(p.name & handlerName) # genSym(nskProc, p.name & handlerName) - p.augmentUserHandler result - result.addExceptionHandler() + p.augmentUserHandler result, canRaise proc addTimeoutParam(procDef: NimNode, defaultValue: int64) = var @@ -477,7 +468,7 @@ proc newMsg(protocol: P2PProtocol, kind: MessageKind, msgId: uint64, recBody = newTree(nnkDistinctTy, recName) result = Message(protocol: protocol, - id: Opt.some(msgId), + id: msgId, ident: msgIdent, kind: kind, procDef: procDef, @@ -489,7 +480,7 @@ proc newMsg(protocol: P2PProtocol, kind: MessageKind, msgId: uint64, if procDef.body.kind != nnkEmpty: var userHandler = copy procDef - protocol.augmentUserHandler userHandler, Opt.some(msgId) + protocol.augmentUserHandler userHandler, true, Opt.some(msgId) userHandler.name = ident(msgName & "UserHandler") # Request and Response handlers get an extra `reqId` parameter if the @@ -518,7 +509,6 @@ proc newMsg(protocol: P2PProtocol, kind: MessageKind, msgId: uint64, of msgResponse: userHandler.applyDecorator protocol.incomingResponseDecorator else: discard - userHandler.addExceptionHandler() result.userHandler = userHandler protocol.outRecvProcs.add result.userHandler @@ -543,7 +533,7 @@ proc addMsg(p: P2PProtocol, msgId: uint64, procDef: NimNode) = let responseIdent = ident($procDef.name & "Response") response = Message(protocol: p, - id: Opt.none(uint64), + id: msgId, ident: responseIdent, kind: msgResponse, recName: returnType, @@ -589,7 +579,10 @@ proc createSendProc*(msg: Message, name = if nameSuffix.len == 0: msg.identWithExportMarker else: ident($msg.ident & nameSuffix) - pragmas = if procType == nnkProcDef: newTree(nnkPragma, ident"gcsafe") + dummy = quote do: + proc dummy(): Future[void] {.async: (raises: [CancelledError, EthP2PError], raw: true).} + + pragmas = if procType == nnkProcDef: dummy.pragma else: newEmptyNode() var def = newNimNode(procType).add( @@ -641,7 +634,7 @@ proc createSendProc*(msg: Message, of msgNotification: discard - def[3][0] = if procType == nnkMacroDef: + def[3][0] = if procType in [nnkMacroDef, nnkTemplateDef]: ident "untyped" elif msg.kind == msgRequest and not isRawSender: Fut(msg.requestResultType) @@ -699,77 +692,6 @@ proc writeParamsAsRecord*(params: openArray[NimNode], var `writer` = init(WriterType(`Format`), `outputStream`) writeValue(`writer`, `param`) -proc useStandardBody*(sendProc: SendProc, - preSerializationStep: proc(stream: NimNode): NimNode, - postSerializationStep: proc(stream: NimNode): NimNode, - sendCallGenerator: proc (peer, bytes: NimNode): NimNode) = - let - msg = sendProc.msg - msgBytes = ident "msgBytes" - recipient = sendProc.peerParam - sendCall = sendCallGenerator(recipient, msgBytes) - - if sendProc.msgParams.len == 0: - sendProc.setBody quote do: - var `msgBytes`: seq[byte] - `sendCall` - return - - let - outputStream = ident "outputStream" - - msgRecName = msg.recName - Format = msg.protocol.backend.SerializationFormat - - preSerialization = if preSerializationStep.isNil: newStmtList() - else: preSerializationStep(outputStream) - - serialization = writeParamsAsRecord(sendProc.msgParams, - outputStream, Format, msgRecName) - - postSerialization = if postSerializationStep.isNil: newStmtList() - else: postSerializationStep(outputStream) - - tracing = when not tracingEnabled: - newStmtList() - else: - logSentMsgFields(recipient, - msg.protocol.protocolInfo, - $msg.ident, - sendProc.msgParams) - - sendProc.setBody quote do: - mixin init, WriterType, beginRecord, endRecord, getOutput - - var `outputStream` = memoryOutput() - `preSerialization` - `serialization` - `postSerialization` - `tracing` - let `msgBytes` = getOutput(`outputStream`) - `sendCall` - -proc correctSerializerProcParams(params: NimNode) = - # A serializer proc is just like a send proc, but: - # 1. it has a void return type - params[0] = ident "void" - # 2. The peer params is replaced with OutputStream - params[1] = newIdentDefs(streamVar, bindSym "OutputStream") - # 3. The timeout param is removed - params.del(params.len - 1) - -proc createSerializer*(msg: Message, procType = nnkProcDef): NimNode = - var serializer = msg.createSendProc(procType, nameSuffix = "Serializer") - correctSerializerProcParams serializer.def.params - - serializer.setBody writeParamsAsRecord( - serializer.msgParams, - streamVar, - msg.protocol.backend.SerializationFormat, - msg.recName) - - return serializer.def - proc defineThunk*(msg: Message, thunk: NimNode) = let protocol = msg.protocol @@ -822,9 +744,9 @@ proc netInit*(p: P2PProtocol): NimNode = p.backend.NetworkType, p.NetworkStateType) -proc createHandshakeTemplate*(msg: Message, - rawSendProc, handshakeImpl, - nextMsg: NimNode): SendProc = +proc createHandshakeTemplate*( + msg: Message, rawSendProc, handshakeImpl, nextMsg: NimNode +): SendProc = let handshakeExchanger = msg.createSendProc(procType = nnkTemplateDef) forwardCall = newCall(rawSendProc).appendAllInputParams(handshakeExchanger.def) @@ -834,19 +756,13 @@ proc createHandshakeTemplate*(msg: Message, forwardCall[1] = peerVar forwardCall.del(forwardCall.len - 1) - let peerVar = genSym(nskLet ,"peer") + let peerVar = genSym(nskLet, "peer") handshakeExchanger.setBody quote do: - try: - let `peerVar` = `peerValue` - let sendingFuture = `forwardCall` - `handshakeImpl`(`peerVar`, - sendingFuture, - `nextMsg`(`peerVar`, `msgRecName`), - `timeoutVar`) - except PeerDisconnected as exc: - raise newException(EthP2PError, exc.msg) - except P2PInternalError as exc: - raise newException(EthP2PError, exc.msg) + let `peerVar` = `peerValue` + let sendingFuture = `forwardCall` + `handshakeImpl`[`msgRecName`]( + `peerVar`, sendingFuture, `nextMsg`(`peerVar`, `msgRecName`), `timeoutVar` + ) return handshakeExchanger @@ -915,10 +831,10 @@ proc processProtocolBody*(p: P2PProtocol, protocolBody: NimNode) = inc nextId elif eqIdent(n[0], "onPeerConnected"): - p.onPeerConnected = p.eventHandlerToProc(n[1], "PeerConnected") + p.onPeerConnected = p.eventHandlerToProc(n[1], "PeerConnected", true) elif eqIdent(n[0], "onPeerDisconnected"): - p.onPeerDisconnected = p.eventHandlerToProc(n[1], "PeerDisconnected") + p.onPeerDisconnected = p.eventHandlerToProc(n[1], "PeerDisconnected", false) else: error(repr(n) & " is not a recognized call in P2P protocol definitions", n) @@ -965,11 +881,8 @@ proc genTypeSection*(p: P2PProtocol): NimNode = if msg.procDef == nil: continue - # FIXME: Can `msg.id` be missing, at all? - doAssert msg.id.isSome() - let - msgId = msg.id.value + msgId = msg.id msgName = msg.ident msgRecName = msg.recName msgStrongRecName = msg.strongRecName @@ -1019,7 +932,7 @@ proc genCode*(p: P2PProtocol): NimNode = regBody.add newCall(p.backend.registerProtocol, protocolVar) result.add quote do: - proc `protocolReg`() {.raises: [RlpError].} = + proc `protocolReg`() = let `protocolVar` = `protocolInit` `regBody` `protocolReg`() @@ -1054,12 +967,11 @@ macro emitForSingleBackend( result = p.genCode() - when defined(p2pProtocolDebug): - try: - result.storeMacroResult true - except IOError: - # IO error so the generated nim code might not be stored, don't sweat it. - discard + try: + result.storeMacroResult true + except IOError: + # IO error so the generated nim code might not be stored, don't sweat it. + discard macro emitForAllBackends(backendSyms: typed, options: untyped, body: untyped): untyped = let name = $(options[0]) diff --git a/eth/p2p/private/p2p_types.nim b/eth/p2p/private/p2p_types.nim index abf7f58..7518bde 100644 --- a/eth/p2p/private/p2p_types.nim +++ b/eth/p2p/private/p2p_types.nim @@ -15,12 +15,9 @@ import chronos, results, ".."/../[rlp], ../../common/[base, keys], - ".."/[enode, kademlia, discovery, rlpxcrypt] + ".."/[enode, kademlia, discovery, rlpxtransport] -export base.NetworkId - -const - useSnappy* = defined(useSnappy) +export base.NetworkId, rlpxtransport, kademlia type EthereumNode* = ref object @@ -39,8 +36,6 @@ type listeningServer*: StreamServer protocolStates*: seq[RootRef] discovery*: DiscoveryProtocol - when useSnappy: - protocolVersion*: uint64 rng*: ref HmacDrbgContext Peer* = ref object @@ -48,16 +43,15 @@ type network*: EthereumNode # Private fields: - transport*: StreamTransport + transport*: RlpxTransport dispatcher*: Dispatcher lastReqId*: Opt[uint64] - secretsState*: SecretState connectionState*: ConnectionState protocolStates*: seq[RootRef] outstandingRequests*: seq[Deque[OutstandingRequest]] # per `msgId` table awaitedMessages*: seq[FutureBase] # per `msgId` table - when useSnappy: - snappyEnabled*: bool + snappyEnabled*: bool + clientId*: string SeenNode* = object nodeId*: NodeId @@ -111,8 +105,7 @@ type protocols*: seq[ProtocolInfo] ProtocolInfo* = ref object - name*: string - version*: uint64 + capability*: Capability messages*: seq[MessageInfo] index*: int # the position of the protocol in the # ordered list of supported protocols @@ -120,8 +113,9 @@ type # Private fields: peerStateInitializer*: PeerStateInitializer networkStateInitializer*: NetworkStateInitializer - handshake*: HandshakeStep - disconnectHandler*: DisconnectionHandler + + onPeerConnected*: OnPeerConnectedHandler + onPeerDisconnected*: OnPeerDisconnectedHandler MessageInfo* = ref object id*: uint64 # this is a `msgId` (as opposed to a `reqId`) @@ -132,6 +126,7 @@ type printer*: MessageContentPrinter requestResolver*: RequestResolver nextMsgResolver*: NextMsgResolver + failResolver*: FailResolver Dispatcher* = ref object # private # The dispatcher stores the mapping of negotiated message IDs between @@ -157,13 +152,12 @@ type OutstandingRequest* = object id*: uint64 # a `reqId` that may be used for response future*: FutureBase - timeoutAt*: Moment # Private types: MessageHandlerDecorator* = proc(msgId: uint64, n: NimNode): NimNode - ThunkProc* = proc(x: Peer, msgId: uint64, data: Rlp): Future[void] - {.gcsafe, async: (raises: [RlpError, EthP2PError]).} + ThunkProc* = proc(x: Peer, data: Rlp): Future[void] + {.async: (raises: [CancelledError, EthP2PError]).} MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].} @@ -174,17 +168,20 @@ type NextMsgResolver* = proc(msgData: Rlp, future: FutureBase) {.gcsafe, raises: [RlpError].} + FailResolver* = proc(reason: DisconnectionReason, future: FutureBase) + {.gcsafe, raises: [].} + PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe, raises: [].} NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe, raises: [].} - HandshakeStep* = proc(peer: Peer): Future[void] - {.gcsafe, async: (raises: [EthP2PError]).} + OnPeerConnectedHandler* = proc(peer: Peer): Future[void] + {.async: (raises: [CancelledError, EthP2PError]).} - DisconnectionHandler* = proc(peer: Peer, reason: DisconnectionReason): - Future[void] {.gcsafe, async: (raises: [EthP2PError]).} + OnPeerDisconnectedHandler* = proc(peer: Peer, reason: DisconnectionReason): + Future[void] {.async: (raises: []).} ConnectionState* = enum None, @@ -209,12 +206,14 @@ type ClientQuitting = 0x08, UnexpectedIdentity = 0x09, SelfConnection = 0x0A, - MessageTimeout = 0x0B, + PingTimeout = 0x0B, SubprotocolReason = 0x10 Address = enode.Address proc `$`*(peer: Peer): string = $peer.remote +proc `$`*(v: Capability): string = v.name & "/" & $v.version + proc toENode*(v: EthereumNode): ENode = ENode(pubkey: v.keys.pubkey, address: v.address) diff --git a/eth/p2p/rlpx.nim b/eth/p2p/rlpx.nim index c078169..a6bf0f9 100644 --- a/eth/p2p/rlpx.nim +++ b/eth/p2p/rlpx.nim @@ -9,10 +9,11 @@ # according to those terms. ## This module implements the `RLPx` Transport Protocol defined at -## `RLPx `_. +## `RLPx `_ +## in its EIP-8 version. ## -## Use NIM command line optipn `-d:p2pProtocolDebug` for dumping the -## generated driver code (just to have it stored somewhere lest one forgets.) +## This modules implements version 5 of the p2p protocol as defined by EIP-706 - +## earlier versions are not supported. ## ## Both, the message ID and the request/response ID are now unsigned. This goes ## along with the RLPx specs (see above) and the sub-protocol specs at @@ -25,35 +26,47 @@ {.push raises: [].} import - std/[algorithm, deques, options, typetraits, os], - stew/shims/macros, chronicles, nimcrypto/utils, chronos, metrics, - ".."/[rlp, common, async_utils], - ./private/p2p_types, "."/[kademlia, auth, rlpxcrypt, enode, p2p_protocol_dsl] + std/[algorithm, deques, options, os, sequtils, strutils, typetraits], + stew/byteutils, + stew/shims/macros, + chronicles, + chronos, + metrics, + snappy, + ../rlp, + ./private/p2p_types, + ./[kademlia, auth, rlpxcrypt, enode, p2p_protocol_dsl] -# TODO: This doesn't get enabled currently in any of the builds, so we send a -# devp2p protocol handshake message with version. Need to check if some peers -# drop us because of this. -when useSnappy: - import snappy - const devp2pSnappyVersion* = 5 +const + devp2pSnappyVersion* = 5 + ## EIP-706 version of devp2p, with snappy compression - no support offered + ## for earlier versions + maxMsgSize = 1024 * 1024 * 16 + ## The maximum message size is normally limited by the 24-bit length field in + ## the message header but in the case of snappy, we need to protect against + ## decompression bombs: + ## https://eips.ethereum.org/EIPS/eip-706#avoiding-dos-attacks + + connectionTimeout = 10.seconds + + msgIdHello = byte 0 + msgIdDisconnect = byte 1 + msgIdPing = byte 2 + msgIdPong = byte 3 # TODO: chronicles re-export here is added for the error # "undeclared identifier: 'activeChroniclesStream'", when the code using p2p # does not import chronicles. Need to resolve this properly. -export - options, p2pProtocol, rlp, chronicles, metrics +export options, p2pProtocol, rlp, chronicles, metrics -declarePublicGauge rlpx_connected_peers, - "Number of connected peers in the pool" +declarePublicGauge rlpx_connected_peers, "Number of connected peers in the pool" -declarePublicCounter rlpx_connect_success, - "Number of successfull rlpx connects" +declarePublicCounter rlpx_connect_success, "Number of successfull rlpx connects" declarePublicCounter rlpx_connect_failure, "Number of rlpx connects that failed", labels = ["reason"] -declarePublicCounter rlpx_accept_success, - "Number of successful rlpx accepted peers" +declarePublicCounter rlpx_accept_success, "Number of successful rlpx accepted peers" declarePublicCounter rlpx_accept_failure, "Number of rlpx accept attempts that failed", labels = ["reason"] @@ -76,10 +89,9 @@ type DisconnectionReasonList = object value: DisconnectionReason - Address = enode.Address - -proc read(rlp: var Rlp; T: type DisconnectionReasonList): T - {.gcsafe, raises: [RlpError].} = +proc read( + rlp: var Rlp, T: type DisconnectionReasonList +): T {.gcsafe, raises: [RlpError].} = ## Rlp mixin: `DisconnectionReasonList` parser if rlp.isList: @@ -88,14 +100,12 @@ proc read(rlp: var Rlp; T: type DisconnectionReasonList): T # exactly one item. if rlp.rawData.len < 3: # avoids looping through all items when parsing for an overlarge array - return DisconnectionReasonList( - value: rlp.read(array[1,DisconnectionReason])[0]) + return DisconnectionReasonList(value: rlp.read(array[1, DisconnectionReason])[0]) # Also accepted: a single byte reason code. Is is typically used # by variants of the reference implementation `Geth` elif rlp.blobLen <= 1: - return DisconnectionReasonList( - value: rlp.read(DisconnectionReason)) + return DisconnectionReasonList(value: rlp.read(DisconnectionReason)) # Also accepted: a blob of a list (aka object) of reason code. It is # used by `bor`, a `geth` fork @@ -103,37 +113,33 @@ proc read(rlp: var Rlp; T: type DisconnectionReasonList): T var subList = rlp.toBytes.rlpFromBytes if subList.isList: # Ditto, see above. - return DisconnectionReasonList( - value: subList.read(array[1,DisconnectionReason])[0]) + return + DisconnectionReasonList(value: subList.read(array[1, DisconnectionReason])[0]) raise newException(RlpTypeMismatch, "Single entry list expected") - -const - devp2pVersion* = 4 - maxMsgSize = 1024 * 1024 * 10 - HandshakeTimeout = MessageTimeout - include p2p_tracing when tracingEnabled: - import - eth/common/eth_types_json_serialization + import eth/common/eth_types_json_serialization export # XXX: This is a work-around for a Nim issue. # See a more detailed comment in p2p_tracing.nim - init, writeValue, getOutput + init, + writeValue, + getOutput -proc init*[MsgName](T: type ResponderWithId[MsgName], - peer: Peer, reqId: uint64): T = +proc init*[MsgName](T: type ResponderWithId[MsgName], peer: Peer, reqId: uint64): T = T(peer: peer, reqId: reqId) proc init*[MsgName](T: type ResponderWithoutId[MsgName], peer: Peer): T = T(peer) -chronicles.formatIt(Peer): $(it.remote) -chronicles.formatIt(Opt[uint64]): (if it.isSome(): $it.value else: "-1") +chronicles.formatIt(Peer): + $(it.remote) +chronicles.formatIt(Opt[uint64]): + (if it.isSome(): $it.value else: "-1") include p2p_backends_helpers @@ -144,35 +150,9 @@ proc requestResolver[MsgType](msg: pointer, future: FutureBase) {.gcsafe.} = f.complete some(cast[ptr MsgType](msg)[]) else: f.complete none(MsgType) - else: - # This future was already resolved, but let's do some sanity checks - # here. The only reasonable explanation is that the request should - # have timed out. - if msg != nil: - try: - if f.read.isSome: - doAssert false, "trying to resolve a request twice" - else: - doAssert false, "trying to resolve a timed out request with a value" - except CatchableError as e: - debug "Exception in requestResolver()", err = e.msg, errName = e.name - else: - try: - if not f.read.isSome: - doAssert false, "a request timed out twice" - # This can except when the future still completes with an error. - # E.g. the `sendMsg` fails because of an already closed transport or a - # broken pipe - except TransportOsError as e: - # E.g. broken pipe - trace "TransportOsError during request", err = e.msg, errName = e.name - except TransportError: - trace "Transport got closed during request" - except CatchableError as e: - debug "Exception in requestResolver()", err = e.msg, errName = e.name proc linkSendFailureToReqFuture[S, R](sendFut: Future[S], resFut: Future[R]) = - sendFut.addCallback() do (arg: pointer): + sendFut.addCallback do(arg: pointer): # Avoiding potentially double future completions if not resFut.finished: if sendFut.failed: @@ -184,28 +164,34 @@ proc messagePrinter[MsgType](msg: pointer): string {.gcsafe.} = # tremendously (for reasons not yet known) # result = $(cast[ptr MsgType](msg)[]) -proc disconnect*(peer: Peer, reason: DisconnectionReason, - notifyOtherPeer = false) {.async: (raises:[]).} +proc disconnect*( + peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false +) {.async: (raises: []).} +# TODO Rework the disconnect-and-raise flow to not do both raising +# and disconnection - this results in convoluted control flow and redundant +# disconnect calls template raisePeerDisconnected(msg: string, r: DisconnectionReason) = var e = newException(PeerDisconnected, msg) e.reason = r raise e -proc disconnectAndRaise(peer: Peer, - reason: DisconnectionReason, - msg: string) {.async: - (raises: [PeerDisconnected]).} = - let r = reason - await peer.disconnect(r) - raisePeerDisconnected(msg, r) +proc disconnectAndRaise( + peer: Peer, reason: DisconnectionReason, msg: string +) {.async: (raises: [PeerDisconnected]).} = + if reason == BreachOfProtocol: + warn "TODO Raising protocol breach", + remote = peer.remote, clientId = peer.clientId, msg + await peer.disconnect(reason) + raisePeerDisconnected(msg, reason) -proc handshakeImpl[T](peer: Peer, - sendFut: Future[void], - responseFut: Future[T], - timeout: Duration): Future[T] {.async: - (raises: [PeerDisconnected, P2PInternalError]).} = - sendFut.addCallback do (arg: pointer) {.gcsafe.}: +proc handshakeImpl[T]( + peer: Peer, + sendFut: Future[void], + responseFut: auto, # Future[T].Raising([CancelledError, EthP2PError]), + timeout: Duration, +): Future[T] {.async: (raises: [CancelledError, EthP2PError]).} = + sendFut.addCallback do(arg: pointer) {.gcsafe.}: if sendFut.failed: debug "Handshake message not delivered", peer @@ -219,35 +205,23 @@ proc handshakeImpl[T](peer: Peer, # understanding what error occured where. # And also, incoming and outgoing disconnect errors should be seperated, # probably by seperating the actual disconnect call to begin with. - await disconnectAndRaise(peer, HandshakeTimeout, - "Protocol handshake was not received in time.") - except CatchableError as exc: - raise newException(P2PInternalError, exc.msg) + await disconnectAndRaise(peer, TcpError, T.name() & " was not received in time.") # Dispatcher # -proc `==`(lhs, rhs: Dispatcher): bool = - lhs.activeProtocols == rhs.activeProtocols - proc describeProtocols(d: Dispatcher): string = - result = "" - for protocol in d.activeProtocols: - if result.len != 0: result.add(',') - for c in protocol.name: result.add(c) + d.activeProtocols.mapIt($it.capability).join(",") proc numProtocols(d: Dispatcher): int = d.activeProtocols.len -proc getDispatcher(node: EthereumNode, - otherPeerCapabilities: openArray[Capability]): Dispatcher = - # TODO: sub-optimal solution until progress is made here: - # https://github.com/nim-lang/Nim/issues/7457 - # We should be able to find an existing dispatcher without allocating a new one - - new result - newSeq(result.protocolOffsets, protocolCount()) - result.protocolOffsets.fill Opt.none(uint64) +proc getDispatcher( + node: EthereumNode, otherPeerCapabilities: openArray[Capability] +): Opt[Dispatcher] = + let dispatcher = Dispatcher() + newSeq(dispatcher.protocolOffsets, protocolCount()) + dispatcher.protocolOffsets.fill Opt.none(uint64) var nextUserMsgId = 0x10u64 @@ -255,9 +229,8 @@ proc getDispatcher(node: EthereumNode, let idx = localProtocol.index block findMatchingProtocol: for remoteCapability in otherPeerCapabilities: - if localProtocol.name == remoteCapability.name and - localProtocol.version == remoteCapability.version: - result.protocolOffsets[idx] = Opt.some(nextUserMsgId) + if localProtocol.capability == remoteCapability: + dispatcher.protocolOffsets[idx] = Opt.some(nextUserMsgId) nextUserMsgId += localProtocol.messages.len.uint64 break findMatchingProtocol @@ -265,85 +238,96 @@ proc getDispatcher(node: EthereumNode, for i in 0 ..< src.len: dest[index + i] = src[i] - result.messages = newSeq[MessageInfo](nextUserMsgId) - devp2pInfo.messages.copyTo(result.messages, 0) + dispatcher.messages = newSeq[MessageInfo](nextUserMsgId) + devp2pInfo.messages.copyTo(dispatcher.messages, 0) for localProtocol in node.protocols: let idx = localProtocol.index - if result.protocolOffsets[idx].isSome: - result.activeProtocols.add localProtocol - localProtocol.messages.copyTo(result.messages, - result.protocolOffsets[idx].value.int) + if dispatcher.protocolOffsets[idx].isSome: + dispatcher.activeProtocols.add localProtocol + localProtocol.messages.copyTo( + dispatcher.messages, dispatcher.protocolOffsets[idx].value.int + ) + + if dispatcher.numProtocols == 0: + Opt.none(Dispatcher) + else: + Opt.some(dispatcher) proc getMsgName*(peer: Peer, msgId: uint64): string = - if not peer.dispatcher.isNil and - msgId < peer.dispatcher.messages.len.uint64 and - not peer.dispatcher.messages[msgId].isNil: + if not peer.dispatcher.isNil and msgId < peer.dispatcher.messages.len.uint64 and + not peer.dispatcher.messages[msgId].isNil: return peer.dispatcher.messages[msgId].name else: - return case msgId - of 0: "hello" - of 1: "disconnect" - of 2: "ping" - of 3: "pong" - else: $msgId - -proc getMsgMetadata*(peer: Peer, msgId: uint64): (ProtocolInfo, MessageInfo) = - doAssert msgId >= 0 - - let dpInfo = devp2pInfo() - if msgId <= dpInfo.messages[^1].id: - return (dpInfo, dpInfo.messages[msgId]) - - if msgId < peer.dispatcher.messages.len.uint64: - let numProtocol = protocolCount() - for i in 0 ..< numProtocol: - let protocol = getProtocol(i) - let offset = peer.dispatcher.protocolOffsets[i] - if offset.isSome and - offset.value + protocol.messages[^1].id >= msgId: - return (protocol, peer.dispatcher.messages[msgId]) + return + case msgId + of msgIdHello: + "hello" + of msgIdDisconnect: + "disconnect" + of msgIdPing: + "ping" + of msgIdPong: + "pong" + else: + $msgId # Protocol info objects # -proc initProtocol(name: string, version: uint64, - peerInit: PeerStateInitializer, - networkInit: NetworkStateInitializer): ProtocolInfo = +proc initProtocol( + name: string, + version: uint64, + peerInit: PeerStateInitializer, + networkInit: NetworkStateInitializer, +): ProtocolInfo = ProtocolInfo( - name : name, - version : version, + capability: Capability(name: name, version: version), messages: @[], peerStateInitializer: peerInit, - networkStateInitializer: networkInit + networkStateInitializer: networkInit, ) -proc setEventHandlers(p: ProtocolInfo, - handshake: HandshakeStep, - disconnectHandler: DisconnectionHandler) = - p.handshake = handshake - p.disconnectHandler = disconnectHandler - -func asCapability*(p: ProtocolInfo): Capability = - result.name = p.name - result.version = p.version +proc setEventHandlers( + p: ProtocolInfo, + onPeerConnected: OnPeerConnectedHandler, + onPeerDisconnected: OnPeerDisconnectedHandler, +) = + p.onPeerConnected = onPeerConnected + p.onPeerDisconnected = onPeerDisconnected proc cmp*(lhs, rhs: ProtocolInfo): int = - return cmp(lhs.name, rhs.name) + let c = cmp(lhs.capability.name, rhs.capability.name) + if c == 0: + # Highest version first! + -cmp(lhs.capability.version, rhs.capability.version) + else: + c -proc nextMsgResolver[MsgType](msgData: Rlp, future: FutureBase) - {.gcsafe, raises: [RlpError].} = +proc nextMsgResolver[MsgType]( + msgData: Rlp, future: FutureBase +) {.gcsafe, raises: [RlpError].} = var reader = msgData - Future[MsgType](future).complete reader.readRecordType(MsgType, - MsgType.rlpFieldsCount > 1) + Future[MsgType](future).complete reader.readRecordType( + MsgType, MsgType.rlpFieldsCount > 1 + ) -proc registerMsg(protocol: ProtocolInfo, - msgId: uint64, - name: string, - thunk: ThunkProc, - printer: MessageContentPrinter, - requestResolver: RequestResolver, - nextMsgResolver: NextMsgResolver) = +proc failResolver[MsgType](reason: DisconnectionReason, future: FutureBase) = + Future[MsgType](future).fail( + (ref PeerDisconnected)(msg: "Peer disconnected during handshake", reason: reason), + warn = false, + ) + +proc registerMsg( + protocol: ProtocolInfo, + msgId: uint64, + name: string, + thunk: ThunkProc, + printer: MessageContentPrinter, + requestResolver: RequestResolver, + nextMsgResolver: NextMsgResolver, + failResolver: FailResolver, +) = if protocol.messages.len.uint64 <= msgId: protocol.messages.setLen(msgId + 1) protocol.messages[msgId] = MessageInfo( @@ -352,7 +336,9 @@ proc registerMsg(protocol: ProtocolInfo, thunk: thunk, printer: printer, requestResolver: requestResolver, - nextMsgResolver: nextMsgResolver) + nextMsgResolver: nextMsgResolver, + failResolver: failResolver, + ) # Message composition and encryption # @@ -362,9 +348,14 @@ proc perPeerMsgIdImpl(peer: Peer, proto: ProtocolInfo, msgId: uint64): uint64 = if not peer.dispatcher.isNil: result += peer.dispatcher.protocolOffsets[proto.index].value -template getPeer(peer: Peer): auto = peer -template getPeer(responder: ResponderWithId): auto = responder.peer -template getPeer(responder: ResponderWithoutId): auto = Peer(responder) +template getPeer(peer: Peer): auto = + peer + +template getPeer(responder: ResponderWithId): auto = + responder.peer + +template getPeer(responder: ResponderWithoutId): auto = + Peer(responder) proc supports*(peer: Peer, proto: ProtocolInfo): bool = peer.dispatcher.protocolOffsets[proto.index].isSome @@ -376,61 +367,147 @@ proc supports*(peer: Peer, Protocol: type): bool = template perPeerMsgId(peer: Peer, MsgType: type): uint64 = perPeerMsgIdImpl(peer, MsgType.msgProtocol.protocolInfo, MsgType.msgId) -proc invokeThunk*(peer: Peer, msgId: uint64, msgData: Rlp): Future[void] - {.async: (raises: [rlp.RlpError, EthP2PError]).} = - template invalidIdError: untyped = - raise newException(UnsupportedMessageError, - "RLPx message with an invalid id " & $msgId & - " on a connection supporting " & peer.dispatcher.describeProtocols) +proc invokeThunk*( + peer: Peer, msgId: uint64, msgData: Rlp +): Future[void] {.async: (raises: [CancelledError, EthP2PError]).} = + template invalidIdError(): untyped = + raise newException( + UnsupportedMessageError, + "RLPx message with an invalid id " & $msgId & " on a connection supporting " & + peer.dispatcher.describeProtocols, + ) - # msgId can be negative as it has int as type and gets decoded from rlp - if msgId >= peer.dispatcher.messages.len.uint64: invalidIdError() - if peer.dispatcher.messages[msgId].isNil: invalidIdError() + if msgId >= peer.dispatcher.messages.len.uint64 or + peer.dispatcher.messages[msgId].isNil: + invalidIdError() + let msgInfo = peer.dispatcher.messages[msgId] - let thunk = peer.dispatcher.messages[msgId].thunk - if thunk == nil: invalidIdError() + doAssert peer.dispatcher.messages.len == peer.awaitedMessages.len, + "Should have been set up in peer constructor" - await thunk(peer, msgId, msgData) + # Check if the peer is "expecting" this message as part of a handshake + if peer.awaitedMessages[msgId] != nil: + let awaited = move(peer.awaitedMessages[msgId]) + peer.awaitedMessages[msgId] = nil + + try: + msgInfo.nextMsgResolver(msgData, awaited) + except rlp.RlpError: + await peer.disconnectAndRaise( + BreachOfProtocol, "Could not decode rlp for " & $msgId + ) + else: + await msgInfo.thunk(peer, msgData) template compressMsg(peer: Peer, data: seq[byte]): seq[byte] = - when useSnappy: - if peer.snappyEnabled: - snappy.encode(data) - else: data + if peer.snappyEnabled: + snappy.encode(data) else: data -proc sendMsg*(peer: Peer, data: seq[byte]) {.async.} = - var cipherText = encryptMsg(peer.compressMsg(data), peer.secretsState) +proc recvMsg( + peer: Peer +): Future[tuple[msgId: uint64, msgRlp: Rlp]] {. + async: (raises: [CancelledError, EthP2PError]) +.} = + var msgBody: seq[byte] try: - var res = await peer.transport.write(cipherText) - if res != len(cipherText): - # This is ECONNRESET or EPIPE case when remote peer disconnected. - await peer.disconnect(TcpError) - discard - except CatchableError as e: - await peer.disconnect(TcpError) - raise e + msgBody = await peer.transport.recvMsg() -proc send*[Msg](peer: Peer, msg: Msg): Future[void] = + trace "Received message", + remote = peer.remote, + clientId = peer.clientId, + data = toHex(msgBody.toOpenArray(0, min(255, msgBody.high))) + + # TODO we _really_ need an rlp decoder that doesn't require this many + # copies of each message... + var tmp = rlpFromBytes(msgBody) + let msgId = tmp.read(uint64) + + if peer.snappyEnabled and tmp.hasData(): + let decoded = + snappy.decode(msgBody.toOpenArray(tmp.position, msgBody.high), maxMsgSize) + if decoded.len == 0: + if msgId == 0x01 and msgBody.len > 1 and msgBody.len < 16 and msgBody[1] == 0xc1: + # Nethermind sends its TooManyPeers uncompressed but we want to be nice! + # https://github.com/NethermindEth/nethermind/issues/7726 + debug "Trying to decode disconnect uncompressed", + remote = peer.remote, clientId = peer.clientId, data = toHex(msgBody) + else: + await peer.disconnectAndRaise( + BreachOfProtocol, "Could not decompress snappy data" + ) + else: + trace "Decoded message", + remote = peer.remote, + clientId = peer.clientId, + decoded = toHex(decoded.toOpenArray(0, min(255, decoded.high))) + tmp = rlpFromBytes(decoded) + + return (msgId, tmp) + except TransportError as exc: + await peer.disconnectAndRaise(TcpError, exc.msg) + except RlpxTransportError as exc: + await peer.disconnectAndRaise(BreachOfProtocol, exc.msg) + except RlpError as exc: + # TODO remove this warning before using in production + warn "TODO: RLP decoding failed for msgId", + remote = peer.remote, + clientId = peer.clientId, + err = exc.msg, + rawData = toHex(msgBody) + + await peer.disconnectAndRaise(BreachOfProtocol, "Could not decode msgId") + +proc encodeMsg(msg: auto): seq[byte] = + var rlpWriter = initRlpWriter() + rlpWriter.appendRecordType(msg, typeof(msg).rlpFieldsCount > 1) + rlpWriter.finish + +proc sendMsg( + peer: Peer, msgId: uint64, payload: seq[byte] +): Future[void] {.async: (raises: [CancelledError, EthP2PError]).} = + try: + let + msgIdBytes = rlp.encodeInt(msgId) + payloadBytes = peer.compressMsg(payload) + + var msg = newSeqOfCap[byte](msgIdBytes.data.len + payloadBytes.len) + msg.add msgIdBytes.data() + msg.add payloadBytes + + trace "Sending message", + remote = peer.remote, + clientId = peer.clientId, + msgId, + data = toHex(msg.toOpenArray(0, min(255, msg.high))), + payload = toHex(payload.toOpenArray(0, min(255, payload.high))) + + await peer.transport.sendMsg(msg) + except TransportError as exc: + await peer.disconnectAndRaise(TcpError, exc.msg) + except RlpxTransportError as exc: + await peer.disconnectAndRaise(BreachOfProtocol, exc.msg) + +proc send*[Msg]( + peer: Peer, msg: Msg +): Future[void] {.async: (raises: [CancelledError, EthP2PError], raw: true).} = logSentMsg(peer, msg) - var rlpWriter = initRlpWriter() - rlpWriter.append perPeerMsgId(peer, Msg) - rlpWriter.appendRecordType(msg, Msg.rlpFieldsCount > 1) - peer.sendMsg rlpWriter.finish + peer.sendMsg perPeerMsgId(peer, Msg), encodeMsg(msg) -proc registerRequest(peer: Peer, - timeout: Duration, - responseFuture: FutureBase, - responseMsgId: uint64): uint64 = - result = if peer.lastReqId.isNone: 0u64 else: peer.lastReqId.value + 1u64 +proc registerRequest( + peer: Peer, timeout: Duration, responseFuture: FutureBase, responseMsgId: uint64 +): uint64 = + result = + if peer.lastReqId.isNone: + 0u64 + else: + peer.lastReqId.value + 1u64 peer.lastReqId = Opt.some(result) let timeoutAt = Moment.fromNow(timeout) - let req = OutstandingRequest(id: result, - future: responseFuture, - timeoutAt: timeoutAt) + let req = OutstandingRequest(id: result, future: responseFuture) peer.outstandingRequests[responseMsgId].addLast req doAssert(not peer.dispatcher.isNil) @@ -448,16 +525,15 @@ proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer) = ## Optional arguments for macro helpers seem easier to handle with ## polymorphic functions (than a `Opt[]` prototype argument.) ## + let msgInfo = peer.dispatcher.messages[msgId] + logScope: - msg = peer.dispatcher.messages[msgId].name - msgContents = peer.dispatcher.messages[msgId].printer(msg) + msg = msgInfo.name + msgContents = msgInfo.printer(msg) receivedReqId = -1 remotePeer = peer.remote - template resolve(future) = - (peer.dispatcher.messages[msgId].requestResolver)(msg, future) - - template outstandingReqs: auto = + template outstandingReqs(): auto = peer.outstandingRequests[msgId] block: # no request ID @@ -476,27 +552,26 @@ proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer) = # issues for such items potentially from another random peer. var expiredRequests = 0 for req in outstandingReqs: - if not req.future.finished: break + if not req.future.finished: + break inc expiredRequests outstandingReqs.shrink(fromFirst = expiredRequests) if outstandingReqs.len > 0: let oldestReq = outstandingReqs.popFirst - resolve oldestReq.future + msgInfo.requestResolver(msg, oldestReq.future) else: trace "late or dup RPLx reply ignored", msgId proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer, reqId: uint64) = ## Variant of `resolveResponseFuture()` for request ID argument. + let msgInfo = peer.dispatcher.messages[msgId] logScope: - msg = peer.dispatcher.messages[msgId].name - msgContents = peer.dispatcher.messages[msgId].printer(msg) + msg = msgInfo.name + msgContents = msgInfo.printer(msg) receivedReqId = reqId remotePeer = peer.remote - template resolve(future) = - (peer.dispatcher.messages[msgId].requestResolver)(msg, future) - - template outstandingReqs: auto = + template outstandingReqs(): auto = peer.outstandingRequests[msgId] block: # have request ID @@ -514,10 +589,10 @@ proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer, reqId: uint6 var idx = 0 while idx < outstandingReqs.len: - template req: auto = outstandingReqs()[idx] + template req(): auto = + outstandingReqs()[idx] if req.future.finished: - doAssert req.timeoutAt <= Moment.now() # Here we'll remove the expired request by swapping # it with the last one in the deque (if necessary): if idx != outstandingReqs.len - 1: @@ -530,7 +605,7 @@ proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer, reqId: uint6 return if req.id == reqId: - resolve req.future + msgInfo.requestResolver msg, req.future # Here we'll remove the found request by swapping # it with the last one in the deque (if necessary): if idx != outstandingReqs.len - 1: @@ -543,81 +618,9 @@ proc resolveResponseFuture(peer: Peer, msgId: uint64, msg: pointer, reqId: uint6 trace "late or dup RPLx reply ignored" - -proc recvMsg*(peer: Peer): Future[tuple[msgId: uint64, msgData: Rlp]] {.async.} = - ## This procs awaits the next complete RLPx message in the TCP stream - - var headerBytes: array[32, byte] - await peer.transport.readExactly(addr headerBytes[0], 32) - - var msgSize: int - var msgHeader: RlpxHeader - if decryptHeaderAndGetMsgSize(peer.secretsState, - headerBytes, msgSize, msgHeader).isErr(): - await peer.disconnectAndRaise(BreachOfProtocol, - "Cannot decrypt RLPx frame header") - - if msgSize > maxMsgSize: - await peer.disconnectAndRaise(BreachOfProtocol, - "RLPx message exceeds maximum size") - - let remainingBytes = encryptedLength(msgSize) - 32 - # TODO: Migrate this to a thread-local seq - # JACEK: - # or pass it in, allowing the caller to choose - they'll likely be in a - # better position to decide if buffer should be reused or not. this will - # also be useful for chunked messages where part of the buffer may have - # been processed and needs filling in - var encryptedBytes = newSeq[byte](remainingBytes) - await peer.transport.readExactly(addr encryptedBytes[0], len(encryptedBytes)) - - let decryptedMaxLength = decryptedLength(msgSize) - var - decryptedBytes = newSeq[byte](decryptedMaxLength) - decryptedBytesCount = 0 - - if decryptBody(peer.secretsState, encryptedBytes, msgSize, - decryptedBytes, decryptedBytesCount).isErr(): - await peer.disconnectAndRaise(BreachOfProtocol, - "Cannot decrypt RLPx frame body") - - decryptedBytes.setLen(decryptedBytesCount) - - when useSnappy: - if peer.snappyEnabled: - decryptedBytes = snappy.decode(decryptedBytes, maxMsgSize) - if decryptedBytes.len == 0: - await peer.disconnectAndRaise(BreachOfProtocol, - "Snappy uncompress encountered malformed data") - - # Check embedded header-data for start of an obsoleted chunked message. - # Note that the check should come *before* the `msgId` is read. For - # instance, if this is a malformed packet, then the `msgId` might be - # random which in turn might try to access a `peer.dispatcher.messages[]` - # slot with a `nil` entry. - # - # The current RLPx requirements need both tuuple entries be zero, see - # github.com/ethereum/devp2p/blob/master/rlpx.md#framing - # - if (msgHeader[4] and 127) != 0 or # capability-id, now required to be zero - (msgHeader[5] and 127) != 0: # context-id, now required to be zero - await peer.disconnectAndRaise( - BreachOfProtocol, "Rejected obsoleted chunked message header") - - var rlp = rlpFromBytes(decryptedBytes) - - var msgId: uint32 - try: - # uint32 as this seems more than big enough for the amount of msgIds - msgId = rlp.read(uint32) - result = (msgId.uint64, rlp) - except RlpError: - await peer.disconnectAndRaise(BreachOfProtocol, - "Cannot read RLPx message id") - - -proc checkedRlpRead(peer: Peer, r: var Rlp, MsgType: type): - auto {.raises: [RlpError].} = +proc checkedRlpRead( + peer: Peer, r: var Rlp, MsgType: type +): auto {.raises: [RlpError].} = when defined(release): return r.read(MsgType) else: @@ -625,41 +628,14 @@ proc checkedRlpRead(peer: Peer, r: var Rlp, MsgType: type): return r.read(MsgType) except rlp.RlpError as e: debug "Failed rlp.read", - peer = peer, - dataType = MsgType.name, - err = e.msg, - errName = e.name - #, rlpData = r.inspect -- don't use (might crash) + peer = peer, dataType = MsgType.name, err = e.msg, errName = e.name + #, rlpData = r.inspect -- don't use (might crash) raise e -proc waitSingleMsg(peer: Peer, MsgType: type): Future[MsgType] {.async.} = - let wantedId = peer.perPeerMsgId(MsgType) - while true: - var (nextMsgId, nextMsgData) = await peer.recvMsg() - - if nextMsgId == wantedId: - try: - result = checkedRlpRead(peer, nextMsgData, MsgType) - logReceivedMsg(peer, result) - return - except rlp.RlpError: - await peer.disconnectAndRaise(BreachOfProtocol, - "Invalid RLPx message body") - - elif nextMsgId == 1: # p2p.disconnect - # TODO: can still raise RlpError here...? - let reasonList = nextMsgData.read(DisconnectionReasonList) - let reason = reasonList.value - await peer.disconnect(reason) - trace "disconnect message received in waitSingleMsg", reason, peer - raisePeerDisconnected("Unexpected disconnect", reason) - else: - debug "Dropped RLPX message", - msg = peer.dispatcher.messages[nextMsgId].name - # TODO: This is breach of protocol? - -proc nextMsg*(peer: Peer, MsgType: type): Future[MsgType] = +proc nextMsg*( + peer: Peer, MsgType: type +): Future[MsgType] {.async: (raises: [CancelledError, EthP2PError], raw: true).} = ## This procs awaits a specific RLPx message. ## Any messages received while waiting will be dispatched to their ## respective handlers. The designated message handler will also run @@ -667,74 +643,24 @@ proc nextMsg*(peer: Peer, MsgType: type): Future[MsgType] = let wantedId = peer.perPeerMsgId(MsgType) let f = peer.awaitedMessages[wantedId] if not f.isNil: - return Future[MsgType](f) + return Future[MsgType].Raising([CancelledError, EthP2PError])(f) initFuture result peer.awaitedMessages[wantedId] = result -# Known fatal errors are handled inside dispatchMessages. -# Errors we are currently unaware of are caught in the dispatchMessages -# callback. There they will be logged if CatchableError and quit on Defect. -# Non fatal errors such as the current CatchableError could be moved and -# handled a layer lower for clarity (and consistency), as also the actual -# message handler code as the TODO mentions already. -proc dispatchMessages*(peer: Peer) {.async.} = - while peer.connectionState notin {Disconnecting, Disconnected}: - var msgId: uint64 - var msgData: Rlp - try: - (msgId, msgData) = await peer.recvMsg() - except TransportError: - # Note: This will also catch TransportIncompleteError. TransportError will - # here usually occur when a read is attempted when the transport is - # already closed. TransportIncompleteError when the transport is closed - # during read. - case peer.connectionState - of Connected: - # Dropped connection, still need to cleanup the peer. - # This could be seen as bad behaving peer. - trace "Dropped connection", peer - await peer.disconnect(ClientQuitting, false) - return - of Disconnecting, Disconnected: - # Graceful disconnect, can still cause TransportIncompleteError as it - # could be that this loop was waiting at recvMsg(). - return - else: - # Connection dropped while `Connecting` (in rlpxConnect/rlpxAccept). - return - except PeerDisconnected: - return +proc dispatchMessages*(peer: Peer) {.async: (raises: []).} = + try: + while peer.connectionState notin {Disconnecting, Disconnected}: + var (msgId, msgData) = await peer.recvMsg() - try: await peer.invokeThunk(msgId, msgData) - except RlpError as e: - debug "RlpError, ending dispatchMessages loop", peer, - msg = peer.getMsgName(msgId), err = e.msg, errName = e.name - await peer.disconnect(BreachOfProtocol, true) - return - except EthP2PError as e: - debug "Error while handling RLPx message", peer, - msg = peer.getMsgName(msgId), err = e.msg, errName = e.name - - # TODO: Hmm, this can be safely moved into the message handler thunk. - # The documentation will need to be updated, explaining the fact that - # nextMsg will be resolved only if the message handler has executed - # successfully. - if msgId < peer.awaitedMessages.len.uint64 and - peer.awaitedMessages[msgId] != nil: - let msgInfo = peer.dispatcher.messages[msgId] - try: - (msgInfo.nextMsgResolver)(msgData, peer.awaitedMessages[msgId]) - except CatchableError as e: - # TODO: Handling errors here must be investigated more carefully. - # They also are supposed to be handled at the call-site where - # `nextMsg` is used. - debug "nextMsg resolver failed, ending dispatchMessages loop", peer, - msg = peer.getMsgName(msgId), err = e.msg - await peer.disconnect(BreachOfProtocol, true) - return - peer.awaitedMessages[msgId] = nil + except EthP2PError: + # TODO Is this needed? Most such exceptions are raised with an accompanying + # disconnect already .. ClientQuitting isn't a great error but as good + # as any since it will have no effect if the disconnect already happened + await peer.disconnect(ClientQuitting) + except CancelledError: + await peer.disconnect(ClientQuitting) proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = let @@ -752,6 +678,7 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = messagePrinter = bindSym "messagePrinter" nextMsgResolver = bindSym "nextMsgResolver" + failResolver = bindSym "failResolver" registerRequest = bindSym "registerRequest" requestResolver = bindSym "requestResolver" resolveResponseFuture = bindSym "resolveResponseFuture" @@ -769,7 +696,8 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = isSubprotocol = protocol.rlpxName != "p2p" - if protocol.rlpxName.len == 0: protocol.rlpxName = protocol.name + if protocol.rlpxName.len == 0: + protocol.rlpxName = protocol.name # By convention, all Ethereum protocol names have at least 3 characters. doAssert protocol.rlpxName.len >= 3 @@ -779,26 +707,27 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = result.setEventHandlers = bindSym "setEventHandlers" result.PeerType = Peer result.NetworkType = EthereumNode - result.ResponderType = if protocol.useRequestIds: ResponderWithId - else: ResponderWithoutId - - result.implementMsg = proc (msg: Message) = - # FIXME: Or is it already assured that `msgId` is available? - doAssert msg.id.isSome + result.ResponderType = + if protocol.useRequestIds: ResponderWithId else: ResponderWithoutId + result.implementMsg = proc(msg: Message) = var - msgIdValue = msg.id.value + msgIdValue = msg.id msgIdent = msg.ident msgName = $msgIdent msgRecName = msg.recName - responseMsgId = if msg.response.isNil: Opt.none(uint64) else: msg.response.id + responseMsgId = + if msg.response.isNil: + Opt.none(uint64) + else: + Opt.some(msg.response.id) hasReqId = msg.hasReqId protocol = msg.protocol # variables used in the sending procs peerOrResponder = ident"peerOrResponder" rlpWriter = ident"writer" - perPeerMsgIdVar = ident"perPeerMsgId" + perPeerMsgIdVar = ident"perPeerMsgId" # variables used in the receiving procs receivedRlp = ident"rlp" @@ -819,17 +748,15 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = doAssert responseMsgId.isSome let reqToResponseOffset = responseMsgId.value - msgIdValue - let responseMsgId = quote do: `perPeerMsgIdVar` + `reqToResponseOffset` + let responseMsgId = quote: + `perPeerMsgIdVar` + `reqToResponseOffset` # Each request is registered so we can resolve it when the response - # arrives. There are two types of protocols: LES-like protocols use - # explicit `reqId` sent over the wire, while the ETH wire protocol - # assumes there is one outstanding request at a time (if there are - # multiple requests we'll resolve them in FIFO order). - let registerRequestCall = newCall(registerRequest, peerVar, - timeoutVar, - resultIdent, - responseMsgId) + # arrives. There are two types of protocols: newer protocols use + # explicit `reqId` sent over the wire, while old versions of the ETH wire + # protocol assume response order matches requests. + let registerRequestCall = + newCall(registerRequest, peerVar, timeoutVar, resultIdent, responseMsgId) if hasReqId: appendParams.add quote do: initFuture `resultIdent` @@ -839,12 +766,11 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = appendParams.add quote do: initFuture `resultIdent` discard `registerRequestCall` - of msgResponse: if hasReqId: paramsToWrite.add newDotExpr(peerOrResponder, reqIdVar) - - of msgHandshake, msgNotification: discard + of msgHandshake, msgNotification: + discard for param, paramType in msg.procDef.typedParams(skip = 1): # This is a fragment of the sending proc that @@ -858,8 +784,11 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = let paramCount = paramsToWrite.len - readParamsPrelude = if paramCount > 1: newCall(tryEnterList, receivedRlp) - else: newStmtList() + readParamsPrelude = + if paramCount > 1: + newCall(tryEnterList, receivedRlp) + else: + newStmtList() when tracingEnabled: readParams.add newCall(bindSym"logReceivedMsg", peerVar, receivedMsg) @@ -868,77 +797,95 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = if msg.kind != msgResponse: newStmtList() elif hasReqId: - newCall(resolveResponseFuture, - peerVar, - newCall(perPeerMsgId, peerVar, msgRecName), - newCall("addr", receivedMsg), - reqIdVar) + newCall( + resolveResponseFuture, + peerVar, + newCall(perPeerMsgId, peerVar, msgRecName), + newCall("addr", receivedMsg), + reqIdVar, + ) else: - newCall(resolveResponseFuture, - peerVar, - newCall(perPeerMsgId, peerVar, msgRecName), - newCall("addr", receivedMsg)) + newCall( + resolveResponseFuture, + peerVar, + newCall(perPeerMsgId, peerVar, msgRecName), + newCall("addr", receivedMsg), + ) var userHandlerParams = @[peerVar] - if hasReqId: userHandlerParams.add reqIdVar + if hasReqId: + userHandlerParams.add reqIdVar let awaitUserHandler = msg.genAwaitUserHandler(receivedMsg, userHandlerParams) thunkName = ident(msgName & "Thunk") msg.defineThunk quote do: - proc `thunkName`(`peerVar`: `Peer`, _: uint64, data: Rlp) - # Fun error if you just use `RlpError` instead of `rlp.RlpError`: - # "Error: type expected, but got symbol 'RlpError' of kind 'EnumField'" - {.async: (raises: [rlp.RlpError, EthP2PError]).} = + proc `thunkName`( + `peerVar`: `Peer`, data: Rlp + ) {.async: (raises: [CancelledError, EthP2PError]).} = var `receivedRlp` = data - var `receivedMsg` {.noinit.}: `msgRecName` - `readParamsPrelude` - `readParams` - `awaitUserHandler` - `callResolvedResponseFuture` + var `receivedMsg`: `msgRecName` + try: + `readParamsPrelude` + `readParams` + `awaitUserHandler` + `callResolvedResponseFuture` + except rlp.RlpError as exc: + # TODO this is a pre-release warning - we should turn this into an + # actual BreachOfProtocol disconnect down the line + warn "TODO: RLP decoding failed for incoming message", + msg = name(`msgRecName`), + remote = `peerVar`.remote, + clientId = `peerVar`.clientId, + err = exc.msg + + await `peerVar`.disconnectAndRaise( + BreachOfProtocol, "Invalid RLP in parameter list for " & $(`msgRecName`) + ) var sendProc = msg.createSendProc(isRawSender = (msg.kind == msgHandshake)) sendProc.def.params[1][0] = peerOrResponder let msgBytes = ident"msgBytes" - finalizeRequest = quote do: + finalizeRequest = quote: let `msgBytes` = `finish`(`rlpWriter`) - var sendCall = newCall(sendMsg, peerVar, msgBytes) - let senderEpilogue = if msg.kind == msgRequest: - # In RLPx requests, the returned future was allocated here and passed - # to `registerRequest`. It's already assigned to the result variable - # of the proc, so we just wait for the sending operation to complete - # and we return in a normal way. (the waiting is done, so we can catch - # any possible errors). - quote: `linkSendFailureToReqFuture`(`sendCall`, `resultIdent`) - else: - # In normal RLPx messages, we are returning the future returned by the - # `sendMsg` call. - quote: return `sendCall` + perPeerMsgIdValue = + if isSubprotocol: + newCall(perPeerMsgIdImpl, peerVar, protocol.protocolInfo, newLit(msgIdValue)) + else: + newLit(msgIdValue) - let perPeerMsgIdValue = if isSubprotocol: - newCall(perPeerMsgIdImpl, peerVar, protocol.protocolInfo, newLit(msgIdValue)) - else: - newLit(msgIdValue) + var sendCall = newCall(sendMsg, peerVar, perPeerMsgIdVar, msgBytes) + let senderEpilogue = + if msg.kind == msgRequest: + # In RLPx requests, the returned future was allocated here and passed + # to `registerRequest`. It's already assigned to the result variable + # of the proc, so we just wait for the sending operation to complete + # and we return in a normal way. (the waiting is done, so we can catch + # any possible errors). + quote: + `linkSendFailureToReqFuture`(`sendCall`, `resultIdent`) + else: + # In normal RLPx messages, we are returning the future returned by the + # `sendMsg` call. + quote: + return `sendCall` if paramCount > 1: # In case there are more than 1 parameter, # the params must be wrapped in a list: - appendParams = newStmtList( - newCall(startList, rlpWriter, newLit(paramCount)), - appendParams) + appendParams = + newStmtList(newCall(startList, rlpWriter, newLit(paramCount)), appendParams) for param in paramsToWrite: appendParams.add newCall(append, rlpWriter, param) - let initWriter = quote do: + let initWriter = quote: var `rlpWriter` = `initRlpWriter`() - const `perProtocolMsgIdVar` {.used.} = `msgIdValue` let `perPeerMsgIdVar` = `perPeerMsgIdValue` - `append`(`rlpWriter`, `perPeerMsgIdVar`) when tracingEnabled: appendParams.add logSentMsgFields(peerVar, protocol, msgId, paramsToWrite) @@ -955,32 +902,59 @@ proc p2pProtocolBackendImpl*(protocol: P2PProtocol): Backend = discard msg.createHandshakeTemplate(sendProc.def.name, handshakeImpl, nextMsg) protocol.outProcRegistrations.add( - newCall(registerMsg, - protocolVar, - newLit(msgIdValue), - newLit(msgName), - thunkName, - newTree(nnkBracketExpr, messagePrinter, msgRecName), - newTree(nnkBracketExpr, requestResolver, msgRecName), - newTree(nnkBracketExpr, nextMsgResolver, msgRecName))) + newCall( + registerMsg, + protocolVar, + newLit(msgIdValue), + newLit(msgName), + thunkName, + newTree(nnkBracketExpr, messagePrinter, msgRecName), + newTree(nnkBracketExpr, requestResolver, msgRecName), + newTree(nnkBracketExpr, nextMsgResolver, msgRecName), + newTree(nnkBracketExpr, failResolver, msgRecName), + ) + ) - result.implementProtocolInit = proc (protocol: P2PProtocol): NimNode = - return newCall(initProtocol, - newLit(protocol.rlpxName), - newLit(protocol.version), - protocol.peerInit, protocol.netInit) + result.implementProtocolInit = proc(protocol: P2PProtocol): NimNode = + return newCall( + initProtocol, + newLit(protocol.rlpxName), + newLit(protocol.version), + protocol.peerInit, + protocol.netInit, + ) +p2pProtocol DevP2P(version = devp2pSnappyVersion, rlpxName = "p2p"): + proc hello( + peer: Peer, + version: uint64, + clientId: string, + capabilities: seq[Capability], + listenPort: uint, + nodeId: array[RawPublicKeySize, byte], + ) = + # The first hello message gets processed during the initial handshake - this + # version is used for any subsequent messages -p2pProtocol DevP2P(version = 5, rlpxName = "p2p"): - proc hello(peer: Peer, - version: uint64, - clientId: string, - capabilities: seq[Capability], - listenPort: uint, - nodeId: array[RawPublicKeySize, byte]) + # TODO investigate and turn warning into protocol breach + warn "TODO Multiple hello messages received", + remote = peer.remote, clientId = clientId + # await peer.disconnectAndRaise(BreachOfProtocol, "Multiple hello messages") proc sendDisconnectMsg(peer: Peer, reason: DisconnectionReasonList) = - trace "disconnect message received", reason=reason.value, peer + ## Notify other peer that we're about to disconnect them for the given + ## reason + if reason.value == BreachOfProtocol: + # TODO This is a temporary log message at warning level to aid in + # debugging in pre-release versions - it should be removed before + # release + # TODO Nethermind sends BreachOfProtocol on network id mismatch: + # https://github.com/NethermindEth/nethermind/issues/7727 + if not peer.clientId.startsWith("Nethermind"): + warn "TODO Peer sent BreachOfProtocol error!", + remote = peer.remote, clientId = peer.clientId + else: + trace "disconnect message received", reason = reason.value, peer await peer.disconnect(reason.value, false) # Adding an empty RLP list as the spec defines. @@ -1009,47 +983,68 @@ proc removePeer(network: EthereumNode, peer: Peer) = if observer.protocol.isNil or peer.supports(observer.protocol): observer.onPeerDisconnected(peer) -proc callDisconnectHandlers(peer: Peer, reason: DisconnectionReason): - Future[void] {.async: (raises: []).} = - var futures = newSeqOfCap[Future[void]](protocolCount()) - - for protocol in peer.dispatcher.activeProtocols: - if protocol.disconnectHandler != nil: - futures.add((protocol.disconnectHandler)(peer, reason)) +proc callDisconnectHandlers( + peer: Peer, reason: DisconnectionReason +): Future[void] {.async: (raises: []).} = + let futures = peer.dispatcher.activeProtocols + .filterIt(it.onPeerDisconnected != nil) + .mapIt(it.onPeerDisconnected(peer, reason)) await noCancel allFutures(futures) - for f in futures: - doAssert(f.finished()) - if f.failed(): - trace "Disconnection handler ended with an error", err = f.error.msg - -proc disconnect*(peer: Peer, reason: DisconnectionReason, - notifyOtherPeer = false) {.async: (raises: []).} = +proc disconnect*( + peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false +) {.async: (raises: []).} = + if reason == BreachOfProtocol: + # TODO remove warning after all protocol breaches have been investigated + # TODO https://github.com/NethermindEth/nethermind/issues/7727 + if not peer.clientId.startsWith("Nethermind"): + warn "TODO disconnecting peer because of protocol breach", + remote = peer.remote, clientId = peer.clientId if peer.connectionState notin {Disconnecting, Disconnected}: + if peer.connectionState == Connected: + # Only log peers that successfully completed the full connection setup - + # the others should have been logged already + debug "Peer disconnected", remote = peer.remote, clientId = peer.clientId, reason + peer.connectionState = Disconnecting + # Do this first so sub-protocols have time to clean up and stop sending # before this node closes transport to remote peer if not peer.dispatcher.isNil: + # Notify all pending handshake handlers that a disconnection happened + for msgId, fut in peer.awaitedMessages.mpairs: + if fut != nil: + var tmp = fut + fut = nil + peer.dispatcher.messages[msgId].failResolver(reason, tmp) + + for msgId, reqs in peer.outstandingRequests.mpairs(): + while reqs.len > 0: + let req = reqs.popFirst() + # Same as when they timeout + peer.dispatcher.messages[msgId].requestResolver(nil, req.future) + # In case of `CatchableError` in any of the handlers, this will be logged. # Other handlers will still execute. # In case of `Defect` in any of the handlers, program will quit. await callDisconnectHandlers(peer, reason) if notifyOtherPeer and not peer.transport.closed: - - proc waitAndClose(peer: Peer, time: Duration) {.async.} = - await sleepAsync(time) - await peer.transport.closeWait() + proc waitAndClose( + transport: RlpxTransport, time: Duration + ) {.async: (raises: []).} = + await noCancel sleepAsync(time) + await noCancel peer.transport.closeWait() try: await peer.sendDisconnectMsg(DisconnectionReasonList(value: reason)) except CatchableError as e: - trace "Failed to deliver disconnect message", peer, - err = e.msg, errName = e.name + trace "Failed to deliver disconnect message", + peer, err = e.msg, errName = e.name # Give the peer a chance to disconnect - traceAsyncErrors peer.waitAndClose(2.seconds) + asyncSpawn peer.transport.waitAndClose(2.seconds) elif not peer.transport.closed: peer.transport.close() @@ -1057,19 +1052,13 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason, peer.connectionState = Disconnected removePeer(peer.network, peer) -func validatePubKeyInHello(msg: DevP2P.hello, pubKey: PublicKey): bool = - let pk = PublicKey.fromRaw(msg.nodeId) - pk.isOk and pk[] == pubKey - -func checkUselessPeer(peer: Peer) {.raises: [UselessPeerError].} = - if peer.dispatcher.numProtocols == 0: - # XXX: Send disconnect + UselessPeer - raise newException(UselessPeerError, "Useless peer") - -proc initPeerState*(peer: Peer, capabilities: openArray[Capability]) - {.raises: [UselessPeerError].} = - peer.dispatcher = getDispatcher(peer.network, capabilities) - checkUselessPeer(peer) +proc initPeerState*( + peer: Peer, capabilities: openArray[Capability] +) {.raises: [UselessPeerError].} = + peer.dispatcher = getDispatcher(peer.network, capabilities).valueOr: + raise (ref UselessPeerError)( + msg: "No capabilities in common: " & capabilities.mapIt($it).join(",") + ) # The dispatcher has determined our message ID sequence. # For each message ID, we allocate a potential slot for @@ -1085,7 +1074,10 @@ proc initPeerState*(peer: Peer, capabilities: openArray[Capability]) peer.lastReqId = Opt.some(0u64) peer.initProtocolStates peer.dispatcher.activeProtocols -proc postHelloSteps(peer: Peer, h: DevP2P.hello) {.async.} = +proc postHelloSteps( + peer: Peer, h: DevP2P.hello +) {.async: (raises: [CancelledError, EthP2PError]).} = + peer.clientId = h.clientId initPeerState(peer, h.capabilities) # Please note that the ordering of operations here is important! @@ -1094,239 +1086,192 @@ proc postHelloSteps(peer: Peer, h: DevP2P.hello) {.async.} = # chance to send any initial packages they might require over # the network and to yield on their `nextMsg` waits. # - var subProtocolsHandshakes = newSeqOfCap[Future[void]](protocolCount()) - for protocol in peer.dispatcher.activeProtocols: - if protocol.handshake != nil: - subProtocolsHandshakes.add((protocol.handshake)(peer)) + + let handshakes = peer.dispatcher.activeProtocols + .filterIt(it.onPeerConnected != nil) + .mapIt(it.onPeerConnected(peer)) # The `dispatchMessages` loop must be started after this. # Otherwise, we risk that some of the handshake packets sent by # the other peer may arrive too early and be processed before # the handshake code got a change to wait for them. # - var messageProcessingLoop = peer.dispatchMessages() - - let cb = proc(p: pointer) {.gcsafe.} = - if messageProcessingLoop.failed: - debug "Ending dispatchMessages loop", peer, - err = messageProcessingLoop.error.msg - traceAsyncErrors peer.disconnect(ClientQuitting) - - messageProcessingLoop.addCallback(cb) + let messageProcessingLoop = peer.dispatchMessages() # The handshake may involve multiple async steps, so we wait # here for all of them to finish. # - await allFutures(subProtocolsHandshakes) + await allFutures(handshakes) - for handshake in subProtocolsHandshakes: - doAssert(handshake.finished()) - if handshake.failed(): - raise handshake.error + for handshake in handshakes: + if not handshake.completed(): + await handshake # raises correct error without actually waiting # This is needed as a peer might have already disconnected. In this case # we need to raise so that rlpxConnect/rlpxAccept fails. # Disconnect is done only to run the disconnect handlers. TODO: improve this # also TODO: Should we discern the type of error? if messageProcessingLoop.finished: - await peer.disconnectAndRaise(ClientQuitting, - "messageProcessingLoop ended while connecting") + await peer.disconnectAndRaise( + ClientQuitting, "messageProcessingLoop ended while connecting" + ) peer.connectionState = Connected -template `^`(arr): auto = - # passes a stack array with a matching `arrLen` - # variable as an open array - arr.toOpenArray(0, `arr Len` - 1) +template setSnappySupport(peer: Peer, hello: DevP2P.hello) = + peer.snappyEnabled = hello.version >= devp2pSnappyVersion.uint64 -proc initSecretState(p: Peer, hs: Handshake, authMsg, ackMsg: openArray[byte]) = - var secrets = hs.getSecrets(authMsg, ackMsg) - initSecretState(secrets, p.secretsState) - burnMem(secrets) +type RlpxError* = enum + TransportConnectError + RlpxHandshakeTransportError + RlpxHandshakeError + ProtocolError + P2PHandshakeError + P2PTransportError + InvalidIdentityError + UselessRlpxPeerError + PeerDisconnectedError + TooManyPeersError -template setSnappySupport(peer: Peer, node: EthereumNode, handshake: Handshake) = - when useSnappy: - peer.snappyEnabled = node.protocolVersion >= devp2pSnappyVersion.uint64 and - handshake.version >= devp2pSnappyVersion.uint64 +proc helloHandshake( + node: EthereumNode, peer: Peer +): Future[DevP2P.hello] {.async: (raises: [CancelledError, EthP2PError]).} = + ## Negotiate common capabilities using the p2p `hello` message -template getVersion(handshake: Handshake): uint64 = - when useSnappy: - handshake.version - else: - devp2pVersion + # https://github.com/ethereum/devp2p/blob/5713591d0366da78a913a811c7502d9ca91d29a8/rlpx.md#hello-0x00 -template baseProtocolVersion(node: EthereumNode): untyped = - when useSnappy: - node.protocolVersion - else: - devp2pVersion + await peer.send( + DevP2P.hello( + version: devp2pSnappyVersion, + clientId: node.clientId, + capabilities: node.capabilities, + listenPort: 0, # obsolete + nodeId: node.keys.pubkey.toRaw(), + ) + ) -template baseProtocolVersion(peer: Peer): uint64 = - when useSnappy: - if peer.snappyEnabled: devp2pSnappyVersion - else: devp2pVersion - else: - devp2pVersion + # The first message received must be a hello or a disconnect + var (msgId, msgData) = await peer.recvMsg() -type - RlpxError* = enum - TransportConnectError, - RlpxHandshakeTransportError, - RlpxHandshakeError, - ProtocolError, - P2PHandshakeError, - P2PTransportError, - InvalidIdentityError, - UselessRlpxPeerError, - PeerDisconnectedError, - TooManyPeersError + try: + case msgId + of msgIdHello: + # Implementations must ignore any additional list elements in Hello + # because they may be used by a future version. + let response = msgData.read(DevP2P.hello) + trace "Received Hello", version = response.version, id = response.clientId -proc rlpxConnect*(node: EthereumNode, remote: Node): - Future[Result[Peer, RlpxError]] {.async.} = - # TODO: Should we not set some timeouts on the `connect` and `readExactly`s? - # Or should we have a general timeout on the whole rlpxConnect where it gets - # called? - # Now, some parts could potential hang until a tcp timeout is hit? + if response.nodeId != peer.transport.pubkey.toRaw: + await peer.disconnectAndRaise( + BreachOfProtocol, "nodeId in hello does not match RLPx transport identity" + ) + + return response + of msgIdDisconnect: # Disconnection requested by peer + # TODO distinguish their reason from ours + let reason = msgData.read(DisconnectionReasonList).value + await peer.disconnectAndRaise( + reason, "Peer disconnecting during hello: " & $reason + ) + else: + # No other messages may be sent until a Hello is received. + await peer.disconnectAndRaise(BreachOfProtocol, "Expected hello, got " & $msgId) + except RlpError: + await peer.disconnectAndRaise(BreachOfProtocol, "Could not decode hello RLP") + +proc rlpxConnect*( + node: EthereumNode, remote: Node +): Future[Result[Peer, RlpxError]] {.async: (raises: [CancelledError]).} = + # TODO move logging elsewhere - the aim is to have exactly _one_ debug log per + # connection attempt (success or failure) to not spam the logs initTracing(devp2pInfo, node.protocols) + logScope: + remote + trace "Connecting to peer" + + let + peer = Peer(remote: remote, network: node) + deadline = sleepAsync(connectionTimeout) - let peer = Peer(remote: remote, network: node) - let ta = initTAddress(remote.node.address.ip, remote.node.address.tcpPort) var error = true defer: + deadline.cancelSoon() # Harmless if finished + if error: # TODO: Not sure if I like this much - if not isNil(peer.transport): - if not peer.transport.closed: - peer.transport.close() + if peer.transport != nil: + peer.transport.close() peer.transport = try: - await connect(ta) - except TransportError: + let ta = initTAddress(remote.node.address.ip, remote.node.address.tcpPort) + await RlpxTransport.connect(node.rng, node.keys, ta, remote.node.pubkey).wait( + deadline + ) + except AsyncTimeoutError: + debug "Connect timeout" return err(TransportConnectError) - except CatchableError as e: - # Aside from TransportOsError, seems raw CatchableError can also occur? - trace "TCP connect with peer failed", err = $e.name, errMsg = $e.msg + except RlpxTransportError as exc: + debug "Connect RlpxTransport error", err = exc.msg + return err(ProtocolError) + except TransportError as exc: + debug "Connect transport error", err = exc.msg return err(TransportConnectError) - # RLPx initial handshake - var - handshake = Handshake.init( - node.rng[], node.keys, {Initiator, EIP8}, node.baseProtocolVersion) - authMsg: array[AuthMessageMaxEIP8, byte] - authMsgLen = 0 - # TODO: Rework this so we won't have to pass an array as parameter? - authMessage( - handshake, node.rng[], remote.node.pubkey, authMsg, authMsgLen).tryGet() - - let writeRes = - try: - await peer.transport.write(addr authMsg[0], authMsgLen) - except TransportError: - return err(RlpxHandshakeTransportError) - except CatchableError as e: # TODO: Only TransportErrors can occur? - raiseAssert($e.name & " " & $e.msg) - if writeRes != authMsgLen: - return err(RlpxHandshakeTransportError) - - let initialSize = handshake.expectedLength - var ackMsg = newSeqOfCap[byte](1024) - ackMsg.setLen(initialSize) - - try: - await peer.transport.readExactly(addr ackMsg[0], len(ackMsg)) - except TransportError: - return err(RlpxHandshakeTransportError) - except CatchableError as e: - raiseAssert($e.name & " " & $e.msg) - - let res = handshake.decodeAckMessage(ackMsg) - if res.isErr and res.error == AuthError.IncompleteError: - ackMsg.setLen(handshake.expectedLength) - try: - await peer.transport.readExactly(addr ackMsg[initialSize], - len(ackMsg) - initialSize) - except TransportError: - return err(RlpxHandshakeTransportError) - except CatchableError as e: # TODO: Only TransportErrors can occur? - raiseAssert($e.name & " " & $e.msg) - - # TODO: Bullet 1 of https://github.com/status-im/nim-eth/issues/559 - let res = handshake.decodeAckMessage(ackMsg) - if res.isErr(): - trace "rlpxConnect handshake error", error = res.error - return err(RlpxHandshakeError) - - peer.setSnappySupport(node, handshake) - peer.initSecretState(handshake, ^authMsg, ackMsg) - logConnectedPeer peer # RLPx p2p capability handshake: After the initial handshake, both sides of # the connection must send either Hello or a Disconnect message. - let - sendHelloFut = peer.hello( - handshake.getVersion(), - node.clientId, - node.capabilities, - uint(node.address.tcpPort), - node.keys.pubkey.toRaw()) - - receiveHelloFut = peer.waitSingleMsg(DevP2P.hello) - - response = - try: - await peer.handshakeImpl( - sendHelloFut, - receiveHelloFut, - 10.seconds) - except RlpError: - return err(ProtocolError) - except PeerDisconnected: + let response = + try: + await node.helloHandshake(peer).wait(deadline) + except AsyncTimeoutError: + debug "Connect handshake timeout" + return err(P2PHandshakeError) + except PeerDisconnected as exc: + debug "Connect handshake disconnection", err = exc.msg, reason = exc.reason + case exc.reason + of TooManyPeers: + return err(TooManyPeersError) + else: return err(PeerDisconnectedError) - # TODO: Strange compiler error - # case e.reason: - # of HandshakeTimeout: - # # Yeah, a bit odd but in this case PeerDisconnected comes from a - # # timeout on the P2P Hello message. TODO: Clean-up that handshakeImpl - # return err(P2PHandshakeError) - # of TooManyPeers: - # return err(TooManyPeersError) - # else: - # return err(PeerDisconnectedError) - except TransportError: - return err(P2PTransportError) - except P2PInternalError: - return err(P2PHandshakeError) - except CatchableError as e: - raiseAssert($e.name & " " & $e.msg) + except UselessPeerError as exc: + debug "Useless peer during handshake", err = exc.msg + return err(UselessRlpxPeerError) + except EthP2PError as exc: + debug "Connect handshake error", err = exc.msg + return err(PeerDisconnectedError) - if not validatePubKeyInHello(response, remote.node.pubkey): - trace "Wrong devp2p identity in Hello message" - return err(InvalidIdentityError) + if response.version < devp2pSnappyVersion: + await peer.disconnect(IncompatibleProtocolVersion, notifyOtherPeer = true) + debug "Peer using obsolete devp2p version", + version = response.version, clientId = response.clientId + return err(UselessRlpxPeerError) - trace "DevP2P handshake completed", peer = remote, + peer.setSnappySupport(response) + + logScope: clientId = response.clientId + trace "DevP2P handshake completed" + try: await postHelloSteps(peer, response) - except RlpError: - return err(ProtocolError) - except PeerDisconnected as e: - case e.reason: + except PeerDisconnected as exc: + debug "Disconnect finishing hello", + remote, clientId = response.clientId, err = exc.msg, reason = exc.reason + case exc.reason of TooManyPeers: return err(TooManyPeersError) else: return err(PeerDisconnectedError) - except UselessPeerError: + except UselessPeerError as exc: + debug "Useless peer finishing hello", err = exc.msg return err(UselessRlpxPeerError) - except TransportError: - return err(P2PTransportError) - except EthP2PError: + except EthP2PError as exc: + debug "P2P error finishing hello", err = exc.msg return err(ProtocolError) - except CatchableError as e: - raiseAssert($e.name & " " & $e.msg) - debug "Peer fully connected", peer = remote, clientId = response.clientId + debug "Peer connected", capabilities = response.capabilities error = false @@ -1334,154 +1279,125 @@ proc rlpxConnect*(node: EthereumNode, remote: Node): # TODO: rework rlpxAccept similar to rlpxConnect. proc rlpxAccept*( - node: EthereumNode, transport: StreamTransport): Future[Peer] {.async: (raises: []).} = + node: EthereumNode, stream: StreamTransport +): Future[Peer] {.async: (raises: [CancelledError, EthP2PError]).} = + # TODO move logging elsewhere - the aim is to have exactly _one_ debug log per + # connection attempt (success or failure) to not spam the logs initTracing(devp2pInfo, node.protocols) - let peer = Peer(transport: transport, network: node) - var handshake = Handshake.init(node.rng[], node.keys, {auth.Responder}) - var ok = false - try: - let initialSize = handshake.expectedLength - var authMsg = newSeqOfCap[byte](1024) + let + peer = Peer(network: node) + deadline = sleepAsync(connectionTimeout) - authMsg.setLen(initialSize) - # TODO: Should we not set some timeouts on these `readExactly`s? - await transport.readExactly(addr authMsg[0], len(authMsg)) - var ret = handshake.decodeAuthMessage(authMsg) - if ret.isErr and ret.error == AuthError.IncompleteError: - # Eip8 auth message is possible, but not likely - authMsg.setLen(handshake.expectedLength) - await transport.readExactly(addr authMsg[initialSize], - len(authMsg) - initialSize) - ret = handshake.decodeAuthMessage(authMsg) + var error = true + defer: + deadline.cancelSoon() - if ret.isErr(): - # It is likely that errors on the handshake Auth is just garbage arriving - # on the TCP port as it is the first data on the incoming connection, - # hence log them as trace. - trace "rlpxAccept handshake error", error = ret.error - if not isNil(peer.transport): - peer.transport.close() + if error: + stream.close() - rlpx_accept_failure.inc() - rlpx_accept_failure.inc(labelValues = ["handshake_error"]) + let remoteAddress = + try: + stream.remoteAddress() + except TransportError as exc: + debug "Could not get remote address", err = exc.msg return nil - ret.get() + trace "Incoming connection", remoteAddress = $remoteAddress - peer.setSnappySupport(node, handshake) - handshake.version = uint8(peer.baseProtocolVersion) + peer.transport = + try: + await RlpxTransport.accept(node.rng, node.keys, stream).wait(deadline) + except AsyncTimeoutError: + debug "Accept timeout", remoteAddress = $remoteAddress + rlpx_accept_failure.inc(labelValues = ["timeout"]) + return nil + except RlpxTransportError as exc: + debug "Accept RlpxTransport error", remoteAddress = $remoteAddress, err = exc.msg + rlpx_accept_failure.inc(labelValues = [$BreachOfProtocol]) + return nil + except TransportError as exc: + debug "Accept transport error", remoteAddress = $remoteAddress, err = exc.msg + rlpx_accept_failure.inc(labelValues = [$TcpError]) + return nil - var ackMsg: array[AckMessageMaxEIP8, byte] - var ackMsgLen: int - handshake.ackMessage(node.rng[], ackMsg, ackMsgLen).tryGet() - var res = await transport.write(addr ackMsg[0], ackMsgLen) - if res != ackMsgLen: - raisePeerDisconnected("Unexpected disconnect while authenticating", - TcpError) + let + # The ports in this address are not necessarily the ports that the peer is + # actually listening on, so we cannot use this information to connect to + # the peer in the future! + ip = + try: + remoteAddress.address + except ValueError: + raiseAssert "only tcp sockets supported" + address = Address(ip: ip, tcpPort: remoteAddress.port, udpPort: remoteAddress.port) - peer.initSecretState(handshake, authMsg, ^ackMsg) + peer.remote = newNode(ENode(pubkey: peer.transport.pubkey, address: address)) - let listenPort = transport.localAddress().port + logAcceptedPeer peer - logAcceptedPeer peer + logScope: + remote = peer.remote - var sendHelloFut = peer.hello( - peer.baseProtocolVersion, - node.clientId, - node.capabilities, - listenPort.uint, - node.keys.pubkey.toRaw()) + let response = + try: + await node.helloHandshake(peer).wait(deadline) + except AsyncTimeoutError: + debug "Accept handshake timeout" + rlpx_accept_failure.inc(labelValues = ["timeout"]) + return nil + except PeerDisconnected as exc: + debug "Accped handshake disconnection", err = exc.msg, reason = exc.reason + rlpx_accept_failure.inc(labelValues = [$exc.reason]) + return nil + except EthP2PError as exc: + debug "Accped handshake error", err = exc.msg + rlpx_accept_failure.inc(labelValues = ["error"]) + return nil - var response = await peer.handshakeImpl( - sendHelloFut, - peer.waitSingleMsg(DevP2P.hello), - 10.seconds) - - trace "Received Hello", version=response.version, id=response.clientId - - if not validatePubKeyInHello(response, handshake.remoteHPubkey): - trace "A Remote nodeId is not its public key" # XXX: Do we care? - - let remote = transport.remoteAddress() - let address = Address(ip: remote.address, tcpPort: remote.port, - udpPort: remote.port) - peer.remote = newNode( - ENode(pubkey: handshake.remoteHPubkey, address: address)) - - trace "devp2p handshake completed", peer = peer.remote, - clientId = response.clientId - - # In case there is an outgoing connection started with this peer we give - # precedence to that one and we disconnect here with `AlreadyConnected` - if peer.remote in node.peerPool.connectedNodes or - peer.remote in node.peerPool.connectingNodes: - trace "Duplicate connection in rlpxAccept" - raisePeerDisconnected("Peer already connecting or connected", - AlreadyConnected) - - node.peerPool.connectingNodes.incl(peer.remote) - - await postHelloSteps(peer, response) - ok = true - trace "Peer fully connected", peer = peer.remote, clientId = response.clientId - except PeerDisconnected as e: - case e.reason - of AlreadyConnected, TooManyPeers, MessageTimeout: - trace "RLPx disconnect", reason = e.reason, peer = peer.remote - else: - debug "RLPx disconnect unexpected", reason = e.reason, - msg = e.msg, peer = peer.remote - - rlpx_accept_failure.inc(labelValues = [$e.reason]) - except TransportIncompleteError: - trace "Connection dropped in rlpxAccept", remote = peer.remote - rlpx_accept_failure.inc(labelValues = [$TransportIncompleteError]) - except UselessPeerError: - trace "Disconnecting useless peer", peer = peer.remote - rlpx_accept_failure.inc(labelValues = [$UselessPeerError]) - except RlpTypeMismatch as e: - # Some peers report capabilities with names longer than 3 chars. We ignore - # those for now. Maybe we should allow this though. - trace "Rlp error in rlpxAccept", err = e.msg, errName = e.name - rlpx_accept_failure.inc(labelValues = [$RlpTypeMismatch]) - except TransportOsError as e: - if e.code == OSErrorCode(110): - trace "RLPx timeout", err = e.msg, errName = e.name - rlpx_accept_failure.inc(labelValues = ["tcp_timeout"]) - else: - trace "TransportOsError", err = e.msg, errName = e.name - rlpx_accept_failure.inc(labelValues = [$e.name]) - except CatchableError as e: - trace "RLPx error", err = e.msg, errName = e.name - rlpx_accept_failure.inc(labelValues = [$e.name]) - - if not ok: - if not isNil(peer.transport): - peer.transport.close() - - rlpx_accept_failure.inc() + if response.version < devp2pSnappyVersion: + await peer.disconnect(IncompatibleProtocolVersion, notifyOtherPeer = true) + debug "Peer using obsolete devp2p version", + version = response.version, clientId = response.clientId + rlpx_accept_failure.inc(labelValues = [$IncompatibleProtocolVersion]) return nil - else: - rlpx_accept_success.inc() - return peer -when isMainModule: + peer.setSnappySupport(response) - when false: - # The assignments below can be used to investigate if the RLPx procs - # are considered GcSafe. The short answer is that they aren't, because - # they dispatch into user code that might use the GC. - type - GcSafeDispatchMsg = proc (peer: Peer, msgId: uint64, msgData: var Rlp) + logScope: + clientId = response.clientId - GcSafeRecvMsg = proc (peer: Peer): - Future[tuple[msgId: uint64, msgData: Rlp]] {.gcsafe.} + trace "DevP2P handshake completed", response - GcSafeAccept = proc (transport: StreamTransport, myKeys: KeyPair): - Future[Peer] {.gcsafe.} + # In case there is an outgoing connection started with this peer we give + # precedence to that one and we disconnect here with `AlreadyConnected` + if peer.remote in node.peerPool.connectedNodes or + peer.remote in node.peerPool.connectingNodes: + trace "Duplicate connection in rlpxAccept" + rlpx_accept_failure.inc(labelValues = [$AlreadyConnected]) + return nil - var - dispatchMsgPtr = invokeThunk - recvMsgPtr: GcSafeRecvMsg = recvMsg - acceptPtr: GcSafeAccept = rlpxAccept + node.peerPool.connectingNodes.incl(peer.remote) + + try: + await postHelloSteps(peer, response) + except PeerDisconnected as exc: + debug "Disconnect while accepting", reason = exc.reason, err = exc.msg + rlpx_accept_failure.inc(labelValues = [$exc.reason]) + return nil + except UselessPeerError as exc: + debug "Useless peer while accepting", err = exc.msg + + rlpx_accept_failure.inc(labelValues = [$UselessPeer]) + return nil + except EthP2PError as exc: + trace "P2P error during accept", err = exc.msg + rlpx_accept_failure.inc(labelValues = [$exc.name]) + return nil + + debug "Peer accepted", capabilities = response.capabilities + + error = false + rlpx_accept_success.inc() + + return peer diff --git a/eth/p2p/rlpxcrypt.nim b/eth/p2p/rlpxcrypt.nim index aa793be..6bc50e9 100644 --- a/eth/p2p/rlpxcrypt.nim +++ b/eth/p2p/rlpxcrypt.nim @@ -38,7 +38,8 @@ type IncompleteError = "rlpx: data incomplete" IncorrectArgs = "rlpx: incorrect arguments" - RlpxHeader* = array[16, byte] + RlpxEncryptedHeader* = array[RlpHeaderLength + RlpMacLength, byte] + RlpxHeader* = array[RlpHeaderLength, byte] RlpxResult*[T] = Result[T, RlpxError] @@ -61,8 +62,8 @@ proc sxor[T](a: var openArray[T], b: openArray[T]) {.inline.} = proc initSecretState*(secrets: ConnectionSecret, context: var SecretState) = ## Initialized `context` with values from `secrets`. - # FIXME: Yes, the encryption is insecure, - # see: https://github.com/ethereum/devp2p/issues/32 + # This scheme is insecure, see: + # https://github.com/ethereum/devp2p/issues/32 # https://github.com/ethereum/py-evm/blob/master/p2p/peer.py#L159-L160 var iv: array[context.aesenc.sizeBlock, byte] context.aesenc.init(secrets.aesKey, iv) @@ -132,8 +133,8 @@ proc encrypt*(c: var SecretState, header: openArray[byte], var frameMac = tmpmac.finish() tmpmac.clear() # return header_ciphertext + header_mac + frame_ciphertext + frame_mac - copyMem(addr output[headerMacPos], addr headerMac.data[0], RlpHeaderLength) - copyMem(addr output[frameMacPos], addr frameMac.data[0], RlpHeaderLength) + copyMem(addr output[headerMacPos], addr headerMac.data[0], RlpMacLength) + copyMem(addr output[frameMacPos], addr frameMac.data[0], RlpMacLength) ok() proc encryptMsg*(msg: openArray[byte], secrets: var SecretState): seq[byte] = @@ -159,21 +160,19 @@ proc encryptMsg*(msg: openArray[byte], secrets: var SecretState): seq[byte] = proc getBodySize*(a: RlpxHeader): int = (int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2]) -proc decryptHeader*(c: var SecretState, data: openArray[byte], - output: var openArray[byte]): RlpxResult[void] = +proc decryptHeader*(c: var SecretState, data: openArray[byte]): RlpxResult[RlpxHeader] = ## Decrypts header `data` using SecretState `c` context and store ## result into `output`. ## - ## `header` must be exactly `RlpHeaderLength + RlpMacLength` length. - ## `output` must be at least `RlpHeaderLength` length. + ## `header` must be at least `RlpHeaderLength + RlpMacLength` length. + var tmpmac: keccak256 aes: array[RlpHeaderLength, byte] - if len(data) != RlpHeaderLength + RlpMacLength: + if len(data) < RlpHeaderLength + RlpMacLength: return err(IncompleteError) - if len(output) < RlpHeaderLength: - return err(IncorrectArgs) + # mac_secret = self.ingress_mac.digest()[:HEADER_LEN] tmpmac = c.imac var macsec = tmpmac.finish() @@ -187,33 +186,17 @@ proc decryptHeader*(c: var SecretState, data: openArray[byte], tmpmac = c.imac var expectMac = tmpmac.finish() # if not bytes_eq(expected_header_mac, header_mac): - let headerMacPos = RlpHeaderLength - if not equalMem(cast[pointer](unsafeAddr data[headerMacPos]), - cast[pointer](addr expectMac.data[0]), RlpMacLength): - result = err(IncorrectMac) - else: - # return self.aes_dec.update(header_ciphertext) - c.aesdec.decrypt(toa(data, 0, RlpHeaderLength), output) - result = ok() + if not equalMem(unsafeAddr data[RlpHeaderLength], + addr expectMac.data[0], RlpMacLength): + return err(IncorrectMac) -proc decryptHeaderAndGetMsgSize*(c: var SecretState, - encryptedHeader: openArray[byte], - outSize: var int, - outHeader: var RlpxHeader): RlpxResult[void] = - result = decryptHeader(c, encryptedHeader, outHeader) - if result.isOk(): - outSize = outHeader.getBodySize - -proc decryptHeaderAndGetMsgSize*(c: var SecretState, - encryptedHeader: openArray[byte], - outSize: var int): RlpxResult[void] = - var decryptedHeader: RlpxHeader - result = decryptHeader(c, encryptedHeader, decryptedHeader) - if result.isOk(): - outSize = decryptedHeader.getBodySize + # return self.aes_dec.update(header_ciphertext) + var output: RlpxHeader + c.aesdec.decrypt(toa(data, 0, RlpHeaderLength), output) + ok(output) proc decryptBody*(c: var SecretState, data: openArray[byte], bodysize: int, - output: var openArray[byte], outlen: var int): RlpxResult[void] = + output: var openArray[byte]): RlpxResult[void] = ## Decrypts body `data` using SecretState `c` context and store ## result into `output`. ## @@ -224,7 +207,6 @@ proc decryptBody*(c: var SecretState, data: openArray[byte], bodysize: int, var tmpmac: keccak256 aes: array[RlpHeaderLength, byte] - outlen = 0 let rsize = roundup16(bodysize) if len(data) < rsize + RlpMacLength: return err(IncompleteError) @@ -245,8 +227,7 @@ proc decryptBody*(c: var SecretState, data: openArray[byte], bodysize: int, let bodyMacPos = rsize if not equalMem(cast[pointer](unsafeAddr data[bodyMacPos]), cast[pointer](addr expectMac.data[0]), RlpMacLength): - result = err(IncorrectMac) + err(IncorrectMac) else: c.aesdec.decrypt(toa(data, 0, rsize), output) - outlen = bodysize - result = ok() + ok() diff --git a/eth/p2p/rlpxtransport.nim b/eth/p2p/rlpxtransport.nim new file mode 100644 index 0000000..7cb1b27 --- /dev/null +++ b/eth/p2p/rlpxtransport.nim @@ -0,0 +1,243 @@ +# nim-eth +# Copyright (c) 2018-2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at +# https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at +# https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + +{.push raises: [], gcsafe.} + +import results, chronos, ../common/keys, ./[auth, rlpxcrypt] + +export results, keys + +type + RlpxTransport* = ref object + stream: StreamTransport + state: SecretState + pubkey*: PublicKey + + RlpxTransportError* = object of CatchableError + +template `^`(arr): auto = + # passes a stack array with a matching `arrLen` variable as an open array + arr.toOpenArray(0, `arr Len` - 1) + +proc initiatorHandshake( + rng: ref HmacDrbgContext, + keys: KeyPair, + stream: StreamTransport, + remotePubkey: PublicKey, +): Future[ConnectionSecret] {. + async: (raises: [CancelledError, TransportError, RlpxTransportError]) +.} = + # https://github.com/ethereum/devp2p/blob/5713591d0366da78a913a811c7502d9ca91d29a8/rlpx.md#initial-handshake + var + handshake = Handshake.init(rng[], keys, {Initiator}) + authMsg: array[AuthMessageMaxEIP8, byte] + + let + authMsgLen = handshake.authMessage(rng[], remotePubkey, authMsg).expect( + "No errors with correctly sized buffer" + ) + + writeRes = await stream.write(addr authMsg[0], authMsgLen) + if writeRes != authMsgLen: + # TOOD raising a chronos error here is a hack - rework using something else + raise (ref TransportIncompleteError)(msg: "Could not write RLPx handshake header") + + var ackMsg = newSeqOfCap[byte](1024) + ackMsg.setLen(MsgLenLenEIP8) + await stream.readExactly(addr ackMsg[0], len(ackMsg)) + + let ackMsgLen = handshake.decodeAckMsgLen(ackMsg).valueOr: + raise + (ref RlpxTransportError)(msg: "Could not decode handshake ack length: " & $error) + + ackMsg.setLen(ackMsgLen) + await stream.readExactly(addr ackMsg[MsgLenLenEIP8], ackMsgLen - MsgLenLenEIP8) + + handshake.decodeAckMessage(ackMsg).isOkOr: + raise (ref RlpxTransportError)(msg: "Could not decode handshake ack: " & $error) + + handshake.getSecrets(^authMsg, ackMsg) + +proc responderHandshake( + rng: ref HmacDrbgContext, keys: KeyPair, stream: StreamTransport +): Future[(ConnectionSecret, PublicKey)] {. + async: (raises: [CancelledError, TransportError, RlpxTransportError]) +.} = + # https://github.com/ethereum/devp2p/blob/5713591d0366da78a913a811c7502d9ca91d29a8/rlpx.md#initial-handshake + var + handshake = Handshake.init(rng[], keys, {auth.Responder}) + authMsg = newSeqOfCap[byte](1024) + + authMsg.setLen(MsgLenLenEIP8) + await stream.readExactly(addr authMsg[0], len(authMsg)) + + let authMsgLen = handshake.decodeAuthMsgLen(authMsg).valueOr: + raise + (ref RlpxTransportError)(msg: "Could not decode handshake auth length: " & $error) + + authMsg.setLen(authMsgLen) + await stream.readExactly(addr authMsg[MsgLenLenEIP8], authMsgLen - MsgLenLenEIP8) + + handshake.decodeAuthMessage(authMsg).isOkOr: + raise (ref RlpxTransportError)( + msg: "Could not decode handshake auth message: " & $error + ) + + var ackMsg: array[AckMessageMaxEIP8, byte] + let ackMsgLen = + handshake.ackMessage(rng[], ackMsg).expect("no errors with correcly sized buffer") + + var res = await stream.write(addr ackMsg[0], ackMsgLen) + if res != ackMsgLen: + # TOOD raising a chronos error here is a hack - rework using something else + raise (ref TransportIncompleteError)(msg: "Could not write RLPx ack message") + + (handshake.getSecrets(authMsg, ^ackMsg), handshake.remoteHPubkey) + +proc connect*( + _: type RlpxTransport, + rng: ref HmacDrbgContext, + keys: KeyPair, + address: TransportAddress, + remotePubkey: PublicKey, +): Future[RlpxTransport] {. + async: (raises: [CancelledError, TransportError, RlpxTransportError]) +.} = + var stream = await connect(address) + + try: + let secrets = await initiatorHandshake(rng, keys, stream, remotePubkey) + var res = RlpxTransport(stream: move(stream), pubkey: remotePubkey) + initSecretState(secrets, res.state) + res + finally: + if stream != nil: + stream.close() + +proc accept*( + _: type RlpxTransport, + rng: ref HmacDrbgContext, + keys: KeyPair, + stream: StreamTransport, +): Future[RlpxTransport] {. + async: (raises: [CancelledError, TransportError, RlpxTransportError]) +.} = + var stream = stream + try: + let (secrets, remotePubkey) = await responderHandshake(rng, keys, stream) + var res = RlpxTransport(stream: move(stream), pubkey: remotePubkey) + initSecretState(secrets, res.state) + res + finally: + if stream != nil: + stream.close() + +proc recvMsg*( + transport: RlpxTransport +): Future[seq[byte]] {. + async: (raises: [CancelledError, TransportError, RlpxTransportError]) +.} = + ## Read an RLPx frame from the given peer + var msgHeaderEnc: RlpxEncryptedHeader + await transport.stream.readExactly(addr msgHeaderEnc[0], msgHeaderEnc.len) + + let msgHeader = decryptHeader(transport.state, msgHeaderEnc).valueOr: + raise (ref RlpxTransportError)(msg: "Cannot decrypt RLPx frame header") + + # The header has more fields than the size, but they are unused / obsolete. + # Although some clients set them, we don't check this in the spirit of EIP-8 + # https://github.com/ethereum/devp2p/blob/5713591d0366da78a913a811c7502d9ca91d29a8/rlpx.md#framing + + let msgSize = msgHeader.getBodySize() + let remainingBytes = encryptedLength(msgSize) - 32 + + var encryptedBytes = newSeq[byte](remainingBytes) + await transport.stream.readExactly(addr encryptedBytes[0], len(encryptedBytes)) + + let decryptedMaxLength = decryptedLength(msgSize) # Padded length + var msgBody = newSeq[byte](decryptedMaxLength) + + if decryptBody(transport.state, encryptedBytes, msgSize, msgBody).isErr(): + raise (ref RlpxTransportError)(msg: "Cannot decrypt message body") + + reset(encryptedBytes) # Release memory (TODO: in-place decryption) + + msgBody.setLen(msgSize) # Remove padding + msgBody + +proc sendMsg*( + transport: RlpxTransport, data: seq[byte] +) {.async: (raises: [CancelledError, TransportError, RlpxTransportError]).} = + let cipherText = encryptMsg(data, transport.state) + var res = await transport.stream.write(cipherText) + if res != len(cipherText): + # TOOD raising a chronos error here is a hack - rework using something else + raise (ref TransportIncompleteError)(msg: "Could not complete writing message") + +proc remoteAddress*( + transport: RlpxTransport +): TransportAddress {.raises: [TransportOsError].} = + transport.stream.remoteAddress() + +proc closed*(transport: RlpxTransport): bool = + transport.stream != nil and transport.stream.closed + +proc close*(transport: RlpxTransport) = + if transport.stream != nil: + transport.stream.close() + +proc closeWait*( + transport: RlpxTransport +): Future[void] {.async: (raises: [], raw: true).} = + transport.stream.closeWait() + +when isMainModule: + # Simple CLI application for negotiating an RLPx connection with a peer + + import stew/byteutils, std/cmdline, std/strutils, eth/rlp + if paramCount() < 3: + echo "rlpxtransport ip port pubkey" + quit 1 + + let + rng = newRng() + kp = KeyPair.random(rng[]) + + echo "Local key: ", toHex(kp.pubkey.toRaw()) + + let client = waitFor RlpxTransport.connect( + rng, + kp, + initTAddress(paramStr(1), parseInt(paramStr(2))), + PublicKey.fromHex(paramStr(3))[], + ) + + proc encodeMsg(msgId: uint64, msg: auto): seq[byte] = + var rlpWriter = initRlpWriter() + rlpWriter.append msgId + rlpWriter.appendRecordType(msg, typeof(msg).rlpFieldsCount > 1) + rlpWriter.finish + + waitFor client.sendMsg( + encodeMsg( + uint64 0, (uint64 4, "nimbus", @[("eth", uint64 68)], uint64 0, kp.pubkey.toRaw()) + ) + ) + + while true: + echo "Reading message" + var data = waitFor client.recvMsg() + var rlp = rlpFromBytes(data) + let msgId = rlp.read(uint64) + if msgId == 0: + echo "Hello: ", + rlp.read((uint64, string, seq[(string, uint64)], uint64, seq[byte])) + else: + echo "Unknown message ", msgId, " ", toHex(data) diff --git a/eth/rlp.nim b/eth/rlp.nim index f01214f..610e767 100644 --- a/eth/rlp.nim +++ b/eth/rlp.nim @@ -448,9 +448,6 @@ func readImpl( else: rlp.bytes.len() - template getUnderlyingType[T](_: Option[T]): untyped = - T - template getUnderlyingType[T](_: Opt[T]): untyped = T @@ -458,16 +455,6 @@ func readImpl( type FieldType {.used.} = type field when hasCustomPragmaFixed(RecordType, fieldName, rlpCustomSerialization): field = rlp.read(result, FieldType) - elif field is Option: - # this works for optional fields at the end of an object/tuple - # if the optional field is followed by a mandatory field, - # custom serialization for a field or for the parent object - # will be better - type UT = getUnderlyingType(field) - if rlp.position < payloadEnd: - field = some(rlp.read(UT)) - else: - field = none(UT) elif field is Opt: # this works for optional fields at the end of an object/tuple # if the optional field is followed by a mandatory field, diff --git a/eth/rlp/options.nim b/eth/rlp/options.nim deleted file mode 100644 index f67b454..0000000 --- a/eth/rlp/options.nim +++ /dev/null @@ -1,17 +0,0 @@ -import - std/options, - ../rlp - -proc read*[T](rlp: var Rlp, O: type Option[T]): O {.inline.} = - mixin read - if not rlp.isEmpty: - result = some read(rlp, T) - -proc append*(writer: var RlpWriter, value: Option) = - if value.isSome: - writer.append value.get - else: - writer.append "" - -export - options, rlp diff --git a/eth/rlp/results.nim b/eth/rlp/results.nim new file mode 100644 index 0000000..f2d3a81 --- /dev/null +++ b/eth/rlp/results.nim @@ -0,0 +1,22 @@ +import ../rlp +import writer +import pkg/results + +export + rlp, results + +proc append*[T](w: var RlpWriter, val: Opt[T]) = + mixin append + + if val.isSome: + w.append(val.get()) + else: + w.append("") + +proc read*[T](rlp: var Rlp, val: var Opt[T]) {.raises: [RlpError].} = + mixin read + if rlp.blobLen != 0: + val = Opt.some(rlp.read(T)) + else: + rlp.skipElem + diff --git a/eth/rlp/writer.nim b/eth/rlp/writer.nim index e726ddc..56362c2 100644 --- a/eth/rlp/writer.nim +++ b/eth/rlp/writer.nim @@ -1,6 +1,6 @@ import std/options, - results, + pkg/results, stew/[arraybuf, assign2, bitops2, shims/macros], ./priv/defs @@ -8,7 +8,7 @@ export arraybuf type RlpWriter* = object - pendingLists: seq[tuple[remainingItems, outBytes: int]] + pendingLists: seq[tuple[remainingItems, startPos: int]] output: seq[byte] RlpIntBuf* = ArrayBuf[9, byte] @@ -41,7 +41,7 @@ func writeCount(bytes: var auto, count: int, baseMarker: byte) = origLen = bytes.len lenPrefixBytes = uint64(count).bytesNeeded - bytes.setLen(origLen + int(lenPrefixBytes) + 1) + bytes.setLen(origLen + lenPrefixBytes + 1) bytes[origLen] = baseMarker + (THRESHOLD_LIST_LEN - 1) + byte(lenPrefixBytes) bytes.writeBigEndian(uint64(count), bytes.len - 1, lenPrefixBytes) @@ -60,17 +60,16 @@ proc initRlpWriter*: RlpWriter = # expected to be short-lived, it doesn't hurt to allocate this buffer result.output = newSeqOfCap[byte](2000) -proc decRet(n: var int, delta: int): int = - n -= delta - n - proc maybeClosePendingLists(self: var RlpWriter) = while self.pendingLists.len > 0: let lastListIdx = self.pendingLists.len - 1 - doAssert self.pendingLists[lastListIdx].remainingItems >= 1 - if decRet(self.pendingLists[lastListIdx].remainingItems, 1) == 0: + doAssert self.pendingLists[lastListIdx].remainingItems > 0 + + self.pendingLists[lastListIdx].remainingItems -= 1 + # if one last item is remaining in the list + if self.pendingLists[lastListIdx].remainingItems == 0: # A list have been just finished. It was started in `startList`. - let listStartPos = self.pendingLists[lastListIdx].outBytes + let listStartPos = self.pendingLists[lastListIdx].startPos self.pendingLists.setLen lastListIdx # How many bytes were written since the start? @@ -104,33 +103,21 @@ proc appendRawBytes*(self: var RlpWriter, bytes: openArray[byte]) = self.output.len - bytes.len, self.output.len - 1), bytes) self.maybeClosePendingLists() -proc appendRawList(self: var RlpWriter, bytes: openArray[byte]) = - self.output.writeCount(bytes.len, LIST_START_MARKER) - self.appendRawBytes(bytes) - proc startList*(self: var RlpWriter, listSize: int) = if listSize == 0: - self.appendRawList([]) + self.output.writeCount(0, LIST_START_MARKER) + self.appendRawBytes([]) else: self.pendingLists.add((listSize, self.output.len)) -proc appendBlob(self: var RlpWriter, data: openArray[byte], startMarker: byte) = +proc appendBlob(self: var RlpWriter, data: openArray[byte]) = if data.len == 1 and byte(data[0]) < BLOB_START_MARKER: self.output.add byte(data[0]) self.maybeClosePendingLists() else: - self.output.writeCount(data.len, startMarker) + self.output.writeCount(data.len, BLOB_START_MARKER) self.appendRawBytes(data) -proc appendImpl(self: var RlpWriter, data: string) = - appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER) - -proc appendBlob(self: var RlpWriter, data: openArray[byte]) = - appendBlob(self, data, BLOB_START_MARKER) - -proc appendBlob(self: var RlpWriter, data: openArray[char]) = - appendBlob(self, data.toOpenArrayByte(0, data.high), BLOB_START_MARKER) - proc appendInt(self: var RlpWriter, i: SomeUnsignedInt) = # this is created as a separate proc as an extra precaution against # any overloading resolution problems when matching the IntLike concept. @@ -138,64 +125,47 @@ proc appendInt(self: var RlpWriter, i: SomeUnsignedInt) = self.maybeClosePendingLists() + +template appendImpl(self: var RlpWriter, data: openArray[byte]) = + self.appendBlob(data) + +template appendImpl(self: var RlpWriter, data: openArray[char]) = + self.appendBlob(data.toOpenArrayByte(0, data.high)) + +template appendImpl(self: var RlpWriter, data: string) = + self.appendBlob(data.toOpenArrayByte(0, data.high)) + template appendImpl(self: var RlpWriter, i: SomeUnsignedInt) = - appendInt(self, i) + self.appendInt(i) template appendImpl(self: var RlpWriter, e: enum) = - appendImpl(self, int(e)) + # TODO: check for negative enums + self.appendInt(uint64(e)) template appendImpl(self: var RlpWriter, b: bool) = - appendImpl(self, int(b)) + self.appendInt(uint64(b)) -proc appendImpl[T](self: var RlpWriter, listOrBlob: openArray[T]) = +proc appendImpl[T](self: var RlpWriter, list: openArray[T]) = mixin append - # TODO: This append proc should be overloaded by `openArray[byte]` after - # nim bug #7416 is fixed. - when T is (byte or char): - self.appendBlob(listOrBlob) - else: - self.startList listOrBlob.len - for i in 0 ..< listOrBlob.len: - self.append listOrBlob[i] + self.startList list.len + for i in 0 ..< list.len: + self.append list[i] -proc hasOptionalFields(T: type): bool = +proc countOptionalFields(T: type): int {.compileTime.} = mixin enumerateRlpFields - proc helper: bool = - var dummy: T - result = false - template detectOptionalField(RT, n, x) {.used.} = - when x is Option or x is Opt: - return true - enumerateRlpFields(dummy, detectOptionalField) - - const res = helper() - return res - -proc optionalFieldsNum(x: openArray[bool]): int = - # count optional fields backward - for i in countdown(x.len-1, 0): - if x[i]: inc result - else: break - -proc checkedOptionalFields(T: type, FC: static[int]): int = - mixin enumerateRlpFields - - var - i = 0 - dummy: T - res: array[FC, bool] + var dummy: T + # closure signature matches the one in object_serialization.nim template op(RT, fN, f) = - res[i] = f is Option or f is Opt - inc i + when f is Option or f is Opt: + inc result + else: # this will count only optional fields at the end + result = 0 enumerateRlpFields(dummy, op) - # ignoring first optional fields - optionalFieldsNum(res) - 1 - proc genPrevFields(obj: NimNode, fd: openArray[FieldDescription], hi, lo: int): NimNode = result = newStmtList() for i in countdown(hi, lo): @@ -230,32 +200,21 @@ macro genOptionalFieldsValidation(obj: untyped, T: type, num: static[int]): unty doAssert obj.blobGasUsed.isSome == obj.excessBlobGas.isSome, "blobGasUsed and excessBlobGas must both be present or absent" -macro countFieldsRuntimeImpl(obj: untyped, T: type, num: static[int]): untyped = - let - Tresolved = getType(T)[1] - fd = recordFields(Tresolved.getImpl) - res = ident("result") - mlen = fd.len - num - - result = newStmtList() - result.add quote do: - `res` = `mlen` - - for i in countdown(fd.high, fd.len-num): - let fieldName = fd[i].name - result.add quote do: - `res` += `obj`.`fieldName`.isSome.ord - proc countFieldsRuntime(obj: object|tuple): int = - # count mandatory fields and non empty optional fields - type ObjType = type obj + mixin enumerateRlpFields - const - fieldsCount = ObjType.rlpFieldsCount - # include first optional fields - cof = checkedOptionalFields(ObjType, fieldsCount) + 1 + var numOptionals: int = 0 - countFieldsRuntimeImpl(obj, ObjType, cof) + template op(RT, fN, f) {.used.} = + when f is Option or f is Opt: + if f.isSome: # if optional and non empty + inc numOptionals + else: # if mandatory field + inc result + numOptionals = 0 # count only optionals at the end (after mandatory) + + enumerateRlpFields(obj, op) + result += numOptionals proc appendRecordType*(self: var RlpWriter, obj: object|tuple, wrapInList = wrapObjsInList) = mixin enumerateRlpFields, append @@ -263,25 +222,22 @@ proc appendRecordType*(self: var RlpWriter, obj: object|tuple, wrapInList = wrap type ObjType = type obj const - hasOptional = hasOptionalFields(ObjType) - fieldsCount = ObjType.rlpFieldsCount + cof = countOptionalFields(ObjType) - when hasOptional: - const - cof = checkedOptionalFields(ObjType, fieldsCount) - when cof > 0: - genOptionalFieldsValidation(obj, ObjType, cof) + when cof > 0: + # ignoring first optional fields + genOptionalFieldsValidation(obj, ObjType, cof - 1) if wrapInList: - when hasOptional: + when cof > 0: self.startList(obj.countFieldsRuntime) else: - self.startList(fieldsCount) + self.startList(ObjType.rlpFieldsCount) template op(RecordType, fieldName, field) {.used.} = when hasCustomPragmaFixed(RecordType, fieldName, rlpCustomSerialization): append(self, obj, field) - elif (field is Option or field is Opt) and hasOptional: + elif (field is Option or field is Opt) and cof > 0: # this works for optional fields at the end of an object/tuple # if the optional field is followed by a mandatory field, # custom serialization for a field or for the parent object @@ -293,20 +249,16 @@ proc appendRecordType*(self: var RlpWriter, obj: object|tuple, wrapInList = wrap enumerateRlpFields(obj, op) -proc appendImpl(self: var RlpWriter, data: object) {.inline.} = +template appendImpl(self: var RlpWriter, data: object) = self.appendRecordType(data) -proc appendImpl(self: var RlpWriter, data: tuple) {.inline.} = +template appendImpl(self: var RlpWriter, data: tuple) = self.appendRecordType(data) # We define a single `append` template with a pretty low specificity # score in order to facilitate easier overloading with user types: template append*[T](w: var RlpWriter; data: T) = - when data is (enum|bool): - # TODO detect negative enum values at compile time? - appendImpl(w, uint64(data)) - else: - appendImpl(w, data) + appendImpl(w, data) template append*(w: var RlpWriter; data: SomeSignedInt) = {.error: "Signed integer encoding is not defined for rlp".} diff --git a/tests/common/test_eth_types_rlp.nim b/tests/common/test_eth_types_rlp.nim index f11cbcb..30fd3d8 100644 --- a/tests/common/test_eth_types_rlp.nim +++ b/tests/common/test_eth_types_rlp.nim @@ -246,53 +246,3 @@ template genTestOpt(TT) = genTestOpt(BlockBodyOpt) genTestOpt(EthBlockOpt) - -suite "EIP-7865 tests": - const reqs = [ - Request( - requestType: DepositRequestType, - deposit: DepositRequest( - pubkey : bytes48"0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - withdrawalCredentials: bytes32"0xBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB", - amount : 1, - signature : bytes96"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - index : 3, - ) - ), - Request( - requestType: WithdrawalRequestType, - withdrawal: WithdrawalRequest( - sourceAddress : address"0xDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD", - validatorPubkey: bytes48"0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - amount : 7, - ) - ), - Request( - requestType: ConsolidationRequestType, - consolidation: ConsolidationRequest( - sourceAddress: address"0xEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE", - sourcePubkey : bytes48"0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - targetPubkey : bytes48"0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ) - ) - ] - - test "rlp roundtrip": - let - body = BlockBody( - withdrawals: Opt.some(@[Withdrawal()]), - requests: Opt.some(@reqs) - ) - - blk = EthBlock( - withdrawals: Opt.some(@[Withdrawal()]), - requests: Opt.some(@reqs) - ) - - encodedBody = rlp.encode(body) - encodedBlock = rlp.encode(blk) - decodedBody = rlp.decode(encodedBody, BlockBody) - decodedBlk = rlp.decode(encodedBlock, EthBlock) - - check decodedBody == body - check decodedBlk == blk diff --git a/tests/p2p/all_tests.nim b/tests/p2p/all_tests.nim index 66b2193..f1261ec 100644 --- a/tests/p2p/all_tests.nim +++ b/tests/p2p/all_tests.nim @@ -6,4 +6,5 @@ import ./test_ecies, ./test_enode, ./test_rlpx_thunk, + ./test_rlpxtransport, ./test_protocol_handlers \ No newline at end of file diff --git a/tests/p2p/test_auth.nim b/tests/p2p/test_auth.nim index 80c7e2d..041817f 100644 --- a/tests/p2p/test_auth.nim +++ b/tests/p2p/test_auth.nim @@ -14,87 +14,6 @@ import nimcrypto/[utils, keccak], ../../eth/common/keys, ../../eth/p2p/auth -# This was generated by `print` actual auth message generated by -# https://github.com/ethereum/py-evm/blob/master/tests/p2p/test_auth.py -const pyevmAuth = """ - 22034ad2e7545e2b0bf02ecb1e40db478dfbbf7aeecc834aec2523eb2b7e74ee - 77ba40c70a83bfe9f2ab91f0131546dcf92c3ee8282d9907fee093017fd0302d - 0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46 - cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406 - 4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74 - d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb - 1100""" - -# This data comes from https://gist.github.com/fjl/3a78780d17c755d22df2 -const data = [ - ("initiator_private_key", - "5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051"), - ("receiver_private_key", - "c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"), - ("initiator_ephemeral_private_key", - "19c2185f4f40634926ebed3af09070ca9e029f2edd5fae6253074896205f5f6c"), - ("receiver_ephemeral_private_key", - "d25688cf0ab10afa1a0e2dba7853ed5f1e5bf1c631757ed4e103b593ff3f5620"), - ("auth_plaintext", - """884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f - 46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa57 - 0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46 - cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406 - 4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74 - d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb - 1100"""), - ("authresp_plaintext", - """802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e - 9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397 - f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7 - 00"""), - ("auth_ciphertext", - """04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc - 43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a - 514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a - 2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c540 - 4a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d - 2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312 - 021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e77 - 23eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93 - d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576d - c017fdd3d581e83cfd26cf125b6d2bda1f1d56"""), - ("authresp_ciphertext", - """049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f - 5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42 - dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b79 - 9771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92b - b9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f - 09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310 - d2cc8b1b9334728f97220bb799376233e113"""), - ("ecdhe_shared_secret", - "e3f407f83fc012470c26a93fdff534100f2c6f736439ce0ca90e9914f7d1c381"), - ("initiator_nonce", - "cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb11"), - ("receiver_nonce", - "f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7"), - ("aes_secret", - "c0458fa97a5230830e05f4f20b7c755c1d4e54b1ce5cf43260bb191eef4e418d"), - ("mac_secret", - "48c938884d5067a1598272fcddaa4b833cd5e7d92e8228c0ecdfabbe68aef7f1"), - ("token", - "3f9ec2592d1554852b1f54d228f042ed0a9310ea86d038dc2b401ba8cd7fdac4"), - ("initial_egress_MAC", - "09771e93b1a6109e97074cbe2d2b0cf3d3878efafe68f53c41bb60c0ec49097e"), - ("initial_ingress_MAC", - "75823d96e23136c89666ee025fb21a432be906512b3dd4a3049e898adb433847"), - ("initiator_hello_packet", - """6ef23fcf1cec7312df623f9ae701e63b550cdb8517fefd8dd398fc2acd1d935e - 6e0434a2b96769078477637347b7b01924fff9ff1c06df2f804df3b0402bbb9f - 87365b3c6856b45e1e2b6470986813c3816a71bff9d69dd297a5dbd935ab578f - 6e5d7e93e4506a44f307c332d95e8a4b102585fd8ef9fc9e3e055537a5cec2e9"""), - ("receiver_hello_packet", - """6ef23fcf1cec7312df623f9ae701e63be36a1cdd1b19179146019984f3625d4a - 6e0434a2b96769050577657247b7b02bc6c314470eca7e3ef650b98c83e9d7dd - 4830b3f718ff562349aead2530a8d28a8484604f92e5fced2c6183f304344ab0 - e7c301a0c05559f4c25db65e36820b4b909a226171a60ac6cb7beea09376d6d8""") -] - # These test vectors were copied from EIP8 specification # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md const eip8data = [ @@ -110,17 +29,6 @@ const eip8data = [ "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6"), ("receiver_nonce", "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd"), - ("auth_ciphertext_v4", - """048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29 - a3d3dc6a3d89eaf913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b - 22c005e9e3a49d6448610a58e98744ba3ac0399e82692d67c1f58849050b3024e - 21a52c9d3b01d871ff5f210817912773e610443a9ef142e91cdba0bd77b5fdf07 - 69b05671fc35f83d83e4d3b0b000c6b2a1b1bba89e0fc51bf4e460df3105c444f - 14be226458940d6061c296350937ffd5e3acaceeaaefd3c6f74be8e23e0f45163 - cc7ebd76220f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057a - db701a715bf59f6fb66b2d1d20f2c703f851cbf5ac47396d9ca65b6260bd141ac - 4d53e2de585a73d1750780db4c9ee4cd4d225173a4592ee77e2bd94d0be3691f3 - b406f9bba9b591fc63facc016bfa8"""), ("auth_ciphertext_eip8", """01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f15 34499d3678b513b0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b15 @@ -151,14 +59,6 @@ const eip8data = [ f37407ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11 f5b575a4b44e36e2bfb2f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31 aa27504e2a533af4cef3b623f4791b2cca6d490"""), - ("authack_ciphertext_v4", - """049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d - 99cadddaa387662b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4 - ede0e09a2d5f585b26513cb794d9635a57563921c04a9090b4f14ee42be1a5461 - 049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c1c66cec761d2bc1c72da - 6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168ddca65 - 05b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73 - e3dda2c71bd1497113d5c755e942d1"""), ("authack_ciphertext_eip8", """01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217 c9b917788989470b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeab @@ -204,12 +104,6 @@ const eip8data = [ let rng = newRng() -proc testValue(s: string): string = - for item in data: - if item[0] == s: - result = item[1] - break - proc testE8Value(s: string): string = for item in eip8data: if item[0] == s: @@ -217,118 +111,6 @@ proc testE8Value(s: string): string = break suite "Ethereum P2P handshake test suite": - block: - proc newTestHandshake(flags: set[HandshakeFlag]): Handshake = - if Initiator in flags: - let pk = PrivateKey.fromHex(testValue("initiator_private_key"))[] - result = Handshake.init(rng[], pk.toKeyPair(), flags) - - let epki = testValue("initiator_ephemeral_private_key") - result.ephemeral = PrivateKey.fromHex(epki)[].toKeyPair() - let nonce = fromHex(stripSpaces(testValue("initiator_nonce"))) - result.initiatorNonce[0..^1] = nonce[0..^1] - elif Responder in flags: - let pk = PrivateKey.fromHex(testValue("receiver_private_key"))[] - result = Handshake.init(rng[], pk.toKeyPair(), flags) - let epkr = testValue("receiver_ephemeral_private_key") - result.ephemeral = PrivateKey.fromHex(epkr)[].toKeyPair() - let nonce = fromHex(stripSpaces(testValue("receiver_nonce"))) - result.responderNonce[0..^1] = nonce[0..^1] - - test "Create plain auth message": - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize(false)) - var k0 = 0 - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0, 0, false).expect("auth success") - var expect1 = fromHex(stripSpaces(testValue("auth_plaintext"))) - var expect2 = fromHex(stripSpaces(pyevmAuth)) - check: - m0[65..^1] == expect1[65..^1] - m0[0..^1] == expect2[0..^1] - - test "Auth message decoding": - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var k0 = 0 - let remoteEPubkey0 = initiator.ephemeral.pubkey - let remoteHPubkey0 = initiator.host.pubkey - - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0).expect("auth success") - responder.decodeAuthMessage(m0).expect("decode success") - check: - responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1] - responder.remoteEPubkey == remoteEPubkey0 - responder.remoteHPubkey == remoteHPubkey0 - - test "ACK message expectation": - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var m1 = newSeq[byte](responder.ackSize(false)) - var k0 = 0 - var k1 = 0 - var expect0 = fromHex(stripSpaces(testValue("authresp_plaintext"))) - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0).expect("auth success") - responder.decodeAuthMessage(m0).expect("decode success") - responder.ackMessage(rng[], m1, k1, 0, false).expect("ack success") - check: - m1 == expect0 - responder.initiatorNonce == initiator.initiatorNonce - - test "ACK message decoding": - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var m1 = newSeq[byte](responder.ackSize()) - var k0 = 0 - var k1 = 0 - - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0).expect("auth success") - responder.decodeAuthMessage(m0).expect("decode success") - responder.ackMessage(rng[], m1, k1).expect("ack success") - initiator.decodeAckMessage(m1).expect("decode success") - let remoteEPubkey0 = responder.ephemeral.pubkey - let remoteHPubkey0 = responder.host.pubkey - check: - initiator.remoteEPubkey == remoteEPubkey0 - initiator.remoteHPubkey == remoteHPubkey0 - initiator.responderNonce == responder.responderNonce - - test "Check derived secrets": - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var authm = fromHex(stripSpaces(testValue("auth_ciphertext"))) - var ackm = fromHex(stripSpaces(testValue("authresp_ciphertext"))) - var taes = fromHex(stripSpaces(testValue("aes_secret"))) - var tmac = fromHex(stripSpaces(testValue("mac_secret"))) - var temac = fromHex(stripSpaces(testValue("initial_egress_MAC"))) - var timac = fromHex(stripSpaces(testValue("initial_ingress_MAC"))) - - responder.decodeAuthMessage(authm).expect("decode success") - initiator.decodeAckMessage(ackm).expect("ack success") - var csecInitiator = initiator.getSecrets(authm, ackm) - var csecResponder = responder.getSecrets(authm, ackm) - check: - csecInitiator.aesKey == csecResponder.aesKey - csecInitiator.macKey == csecResponder.macKey - taes[0..^1] == csecInitiator.aesKey[0..^1] - tmac[0..^1] == csecInitiator.macKey[0..^1] - let iemac = csecInitiator.egressMac.finish() - let iimac = csecInitiator.ingressMac.finish() - let remac = csecResponder.egressMac.finish() - let rimac = csecResponder.ingressMac.finish() - check: - iemac.data[0..^1] == temac[0..^1] - iimac.data[0..^1] == timac[0..^1] - remac.data[0..^1] == timac[0..^1] - rimac.data[0..^1] == temac[0..^1] - block: proc newTestHandshake(flags: set[HandshakeFlag]): Handshake = if Initiator in flags: @@ -348,25 +130,6 @@ suite "Ethereum P2P handshake test suite": let nonce = fromHex(stripSpaces(testE8Value("receiver_nonce"))) result.responderNonce[0..^1] = nonce[0..^1] - test "AUTH/ACK v4 test vectors": # auth/ack v4 - var initiator = newTestHandshake({Initiator}) - var responder = newTestHandshake({Responder}) - var m0 = fromHex(stripSpaces(testE8Value("auth_ciphertext_v4"))) - responder.decodeAuthMessage(m0).expect("decode success") - check: - responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1] - let remoteEPubkey0 = initiator.ephemeral.pubkey - let remoteHPubkey0 = initiator.host.pubkey - check: - responder.remoteEPubkey == remoteEPubkey0 - responder.remoteHPubkey == remoteHPubkey0 - var m1 = fromHex(stripSpaces(testE8Value("authack_ciphertext_v4"))) - initiator.decodeAckMessage(m1).expect("decode success") - let remoteEPubkey1 = responder.ephemeral.pubkey - check: - initiator.remoteEPubkey == remoteEPubkey1 - initiator.responderNonce[0..^1] == responder.responderNonce[0..^1] - test "AUTH/ACK EIP-8 test vectors": var initiator = newTestHandshake({Initiator}) var responder = newTestHandshake({Responder}) @@ -390,8 +153,6 @@ suite "Ethereum P2P handshake test suite": var csecInitiator = initiator.getSecrets(m0, m1) var csecResponder = responder.getSecrets(m0, m1) check: - int(initiator.version) == 4 - int(responder.version) == 4 csecInitiator.aesKey == csecResponder.aesKey csecInitiator.macKey == csecResponder.macKey taes[0..^1] == csecInitiator.aesKey[0..^1] @@ -418,25 +179,21 @@ suite "Ethereum P2P handshake test suite": initiator.decodeAckMessage(m1).expect("decode success") let remoteEPubkey1 = responder.ephemeral.pubkey check: - int(initiator.version) == 57 - int(responder.version) == 56 initiator.remoteEPubkey == remoteEPubkey1 initiator.responderNonce[0..^1] == responder.responderNonce[0..^1] test "100 AUTH/ACK EIP-8 handshakes": for i in 1..100: - var initiator = newTestHandshake({Initiator, EIP8}) + var initiator = newTestHandshake({Initiator}) var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var k0 = 0 - var k1 = 0 - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0).expect("auth success") + var m0 = newSeq[byte](AuthMessageMaxEIP8) + let k0 = initiator.authMessage( + rng[], responder.host.pubkey, m0).expect("auth success") m0.setLen(k0) responder.decodeAuthMessage(m0).expect("decode success") - check (EIP8 in responder.flags) == true - var m1 = newSeq[byte](responder.ackSize()) - responder.ackMessage(rng[], m1, k1).expect("ack success") + + var m1 = newSeq[byte](AckMessageMaxEIP8) + let k1 = responder.ackMessage(rng[], m1).expect("ack success") m1.setLen(k1) initiator.decodeAckMessage(m1).expect("decode success") var csecInitiator = initiator.getSecrets(m0, m1) @@ -449,15 +206,13 @@ suite "Ethereum P2P handshake test suite": for i in 1..100: var initiator = newTestHandshake({Initiator}) var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var k0 = 0 - var k1 = 0 - initiator.authMessage( - rng[], responder.host.pubkey, m0, k0).expect("auth success") + var m0 = newSeq[byte](AuthMessageMaxEIP8) + let k0 = initiator.authMessage( + rng[], responder.host.pubkey, m0).expect("auth success") m0.setLen(k0) responder.decodeAuthMessage(m0).expect("auth success") - var m1 = newSeq[byte](responder.ackSize()) - responder.ackMessage(rng[], m1, k1).expect("ack success") + var m1 = newSeq[byte](AckMessageMaxEIP8) + let k1 = responder.ackMessage(rng[], m1).expect("ack success") m1.setLen(k1) initiator.decodeAckMessage(m1).expect("ack success") @@ -507,10 +262,10 @@ suite "Ethereum P2P handshake test suite": res.error == AuthError.IncompleteError test "Invalid AckMessage - Minimum input size": - var initiator = newTestHandshake({Initiator, EIP8}) + var initiator = newTestHandshake({Initiator,}) # 1 byte short on minimum size - let m = newSeq[byte](AckMessageV4Length - 1) + let m = newSeq[byte](AckMessageEIP8Length - 1) let res = initiator.decodeAckMessage(m) check: @@ -518,7 +273,7 @@ suite "Ethereum P2P handshake test suite": res.error == AuthError.IncompleteError test "Invalid AckMessage - Minimum size prefix": - var initiator = newTestHandshake({Initiator, EIP8}) + var initiator = newTestHandshake({Initiator}) # Minimum size for EIP8 AckMessage var m = newSeq[byte](AckMessageEIP8Length) @@ -532,7 +287,7 @@ suite "Ethereum P2P handshake test suite": res.error == AuthError.IncompleteError test "Invalid AckMessage - Size prefix bigger than input": - var initiator = newTestHandshake({Initiator, EIP8}) + var initiator = newTestHandshake({Initiator}) # Minimum size for EIP8 AckMessage var m = newSeq[byte](AckMessageEIP8Length) diff --git a/tests/p2p/test_crypt.nim b/tests/p2p/test_crypt.nim index b630a28..46382eb 100644 --- a/tests/p2p/test_crypt.nim +++ b/tests/p2p/test_crypt.nim @@ -11,176 +11,147 @@ import unittest2, - nimcrypto/[utils, sysrand], + nimcrypto/[utils, keccak, sysrand], ../../eth/common/keys, ../../eth/p2p/[auth, rlpxcrypt] -const data = [ - ("initiator_private_key", - "5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051"), - ("receiver_private_key", - "c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"), - ("initiator_ephemeral_private_key", - "19c2185f4f40634926ebed3af09070ca9e029f2edd5fae6253074896205f5f6c"), - ("receiver_ephemeral_private_key", - "d25688cf0ab10afa1a0e2dba7853ed5f1e5bf1c631757ed4e103b593ff3f5620"), - ("auth_plaintext", - """884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f - 46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa57 - 0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46 - cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406 - 4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74 - d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb - 1100"""), - ("authresp_plaintext", - """802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e - 9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397 - f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7 - 00"""), - ("auth_ciphertext", - """04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc - 43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a - 514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a - 2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c540 - 4a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d - 2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312 - 021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e77 - 23eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93 - d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576d - c017fdd3d581e83cfd26cf125b6d2bda1f1d56"""), - ("authresp_ciphertext", - """049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f - 5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42 - dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b79 - 9771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92b - b9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f - 09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310 - d2cc8b1b9334728f97220bb799376233e113"""), - ("ecdhe_shared_secret", - "e3f407f83fc012470c26a93fdff534100f2c6f736439ce0ca90e9914f7d1c381"), - ("initiator_nonce", - "cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb11"), - ("receiver_nonce", - "f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7"), - ("aes_secret", - "c0458fa97a5230830e05f4f20b7c755c1d4e54b1ce5cf43260bb191eef4e418d"), - ("mac_secret", - "48c938884d5067a1598272fcddaa4b833cd5e7d92e8228c0ecdfabbe68aef7f1"), - ("token", - "3f9ec2592d1554852b1f54d228f042ed0a9310ea86d038dc2b401ba8cd7fdac4"), - ("initial_egress_MAC", - "09771e93b1a6109e97074cbe2d2b0cf3d3878efafe68f53c41bb60c0ec49097e"), - ("initial_ingress_MAC", - "75823d96e23136c89666ee025fb21a432be906512b3dd4a3049e898adb433847"), - ("initiator_hello_packet", - """6ef23fcf1cec7312df623f9ae701e63b550cdb8517fefd8dd398fc2acd1d935e - 6e0434a2b96769078477637347b7b01924fff9ff1c06df2f804df3b0402bbb9f - 87365b3c6856b45e1e2b6470986813c3816a71bff9d69dd297a5dbd935ab578f - 6e5d7e93e4506a44f307c332d95e8a4b102585fd8ef9fc9e3e055537a5cec2e9"""), - ("receiver_hello_packet", - """6ef23fcf1cec7312df623f9ae701e63be36a1cdd1b19179146019984f3625d4a - 6e0434a2b96769050577657247b7b02bc6c314470eca7e3ef650b98c83e9d7dd - 4830b3f718ff562349aead2530a8d28a8484604f92e5fced2c6183f304344ab0 - e7c301a0c05559f4c25db65e36820b4b909a226171a60ac6cb7beea09376d6d8""") -] +# EIP-8 test case +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md#rlpx-handshake + +const + staticKeyA = fromHex("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + staticKeyB = fromHex("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + ephemeralKeyA = fromHex("869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d") + ephemeralKeyB = fromHex("e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4") + nonceA = fromHex("7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6") + nonceB = fromHex("559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd") + + auth1 = fromHex(stripSpaces(""" + 048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf + 913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744 + ba3ac0399e82692d67c1f58849050b3024e21a52c9d3b01d871ff5f210817912773e610443a9ef14 + 2e91cdba0bd77b5fdf0769b05671fc35f83d83e4d3b0b000c6b2a1b1bba89e0fc51bf4e460df3105 + c444f14be226458940d6061c296350937ffd5e3acaceeaaefd3c6f74be8e23e0f45163cc7ebd7622 + 0f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057adb701a715bf59f6fb66b2d1d2 + 0f2c703f851cbf5ac47396d9ca65b6260bd141ac4d53e2de585a73d1750780db4c9ee4cd4d225173 + a4592ee77e2bd94d0be3691f3b406f9bba9b591fc63facc016bfa8""")) + auth2 = fromHex(stripSpaces(""" + 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b + 0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84 + 9634a8c458705bf83a626ea0384d4d7341aae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6c + da61110601d3b4c02ab6c30437257a6e0117792631a4b47c1d52fc0f8f89caadeb7d02770bf999cc + 147d2df3b62e1ffb2c9d8c125a3984865356266bca11ce7d3a688663a51d82defaa8aad69da39ab6 + d5470e81ec5f2a7a47fb865ff7cca21516f9299a07b1bc63ba56c7a1a892112841ca44b6e0034dee + 70c9adabc15d76a54f443593fafdc3b27af8059703f88928e199cb122362a4b35f62386da7caad09 + c001edaeb5f8a06d2b26fb6cb93c52a9fca51853b68193916982358fe1e5369e249875bb8d0d0ec3 + 6f917bc5e1eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c0263440e + 2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e547678c5190341e4f1693956c + 3bf7678318e2d5b5340c9e488eefea198576344afbdf66db5f51204a6961a63ce072c8926c""")) + auth3 = fromHex(stripSpaces(""" + 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7 + 2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf + 280a6b88e26299cf263b01b8dfdb712278464fd1c25840b995e84d367d743f66c0e54a586725b7bb + f12acca27170ae3283c1073adda4b6d79f27656993aefccf16e0d0409fe07db2dc398a1b7e8ee93b + cd181485fd332f381d6a050fba4c7641a5112ac1b0b61168d20f01b479e19adf7fdbfa0905f63352 + bfc7e23cf3357657455119d879c78d3cf8c8c06375f3f7d4861aa02a122467e069acaf513025ff19 + 6641f6d2810ce493f51bee9c966b15c5043505350392b57645385a18c78f14669cc4d960446c1757 + 1b7c5d725021babbcd786957f3d17089c084907bda22c2b2675b4378b114c601d858802a55345a15 + 116bc61da4193996187ed70d16730e9ae6b3bb8787ebcaea1871d850997ddc08b4f4ea668fbf3740 + 7ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11f5b575a4b44e36e2bfb2 + f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31aa27504e2a533af4cef3b623f4791b2cca6 + d490""")) + ack1 = fromHex(stripSpaces(""" + 049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662 + b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963 + 5a57563921c04a9090b4f14ee42be1a5461049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c + 1c66cec761d2bc1c72da6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168d + dca6505b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73e3dda2c71b + d1497113d5c755e942d1""")) + ack2 = fromHex(stripSpaces(""" + 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470 + b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de + 05d59279e3524ab26ef753a0095637ac88f2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814 + c4652f11b254f8a2d0191e2f5546fae6055694aed14d906df79ad3b407d94692694e259191cde171 + ad542fc588fa2b7333313d82a9f887332f1dfc36cea03f831cb9a23fea05b33deb999e85489e645f + 6aab1872475d488d7bd6c7c120caf28dbfc5d6833888155ed69d34dbdc39c1f299be1057810f34fb + e754d021bfca14dc989753d61c413d261934e1a9c67ee060a25eefb54e81a4d14baff922180c395d + 3f998d70f46f6b58306f969627ae364497e73fc27f6d17ae45a413d322cb8814276be6ddd13b885b + 201b943213656cde498fa0e9ddc8e0b8f8a53824fbd82254f3e2c17e8eaea009c38b4aa0a3f306e8 + 797db43c25d68e86f262e564086f59a2fc60511c42abfb3057c247a8a8fe4fb3ccbadde17514b7ac + 8000cdb6a912778426260c47f38919a91f25f4b5ffb455d6aaaf150f7e5529c100ce62d6d92826a7 + 1778d809bdf60232ae21ce8a437eca8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc7 + 5833c2464c805246155289f4""")) + ack3 = fromHex(stripSpaces(""" + 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7 + ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0 + 3a06c9fd5e35737e417bc28c1cbf5e5dfc666de7090f69c3b29754725f84f75382891c561040ea1d + dc0d8f381ed1b9d0d4ad2a0ec021421d847820d6fa0ba66eaf58175f1b235e851c7e2124069fbc20 + 2888ddb3ac4d56bcbd1b9b7eab59e78f2e2d400905050f4a92dec1c4bdf797b3fc9b2f8e84a482f3 + d800386186712dae00d5c386ec9387a5e9c9a1aca5a573ca91082c7d68421f388e79127a5177d4f8 + 590237364fd348c9611fa39f78dcdceee3f390f07991b7b47e1daa3ebcb6ccc9607811cb17ce51f1 + c8c2c5098dbdd28fca547b3f58c01a424ac05f869f49c6a34672ea2cbbc558428aa1fe48bbfd6115 + 8b1b735a65d99f21e70dbc020bfdface9f724a0d1fb5895db971cc81aa7608baa0920abb0a565c9c + 436e2fd13323428296c86385f2384e408a31e104670df0791d93e743a3a5194ee6b076fb6323ca59 + 3011b7348c16cf58f66b9633906ba54a2ee803187344b394f75dd2e663a57b956cb830dd7a908d4f + 39a2336a61ef9fda549180d4ccde21514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec0 + 35b9593b48b9d3ca4c13d245d5f04169b0b1""")) + + aesSecret2 = fromHex("80e8632c05fed6fc2a13b0f8d31a3cf645366239170ea067065aba8e28bac487") + macSecret2 = fromHex("2ea74ec5dae199227dff1af715362700e989d889d7a493cb0639691efb8e5f98") let rng = newRng() -proc testValue(s: string): string = - for item in data: - if item[0] == s: - result = item[1] - break - suite "Ethereum RLPx encryption/decryption test suite": proc newTestHandshake(flags: set[HandshakeFlag]): Handshake = if Initiator in flags: - let pk = PrivateKey.fromHex(testValue("initiator_private_key"))[] + let pk = PrivateKey.fromRaw(staticKeyA)[] result = Handshake.init(rng[], pk.toKeyPair(), flags) - let epki = testValue("initiator_ephemeral_private_key") - result.ephemeral = PrivateKey.fromHex(epki)[].toKeyPair() - let nonce = fromHex(stripSpaces(testValue("initiator_nonce"))) - result.initiatorNonce[0..^1] = nonce[0..^1] + result.ephemeral = PrivateKey.fromRaw(ephemeralKeyA)[].toKeyPair() + result.initiatorNonce[0..^1] = nonceA elif Responder in flags: - let pk = PrivateKey.fromHex(testValue("receiver_private_key"))[] + let pk = PrivateKey.fromRaw(staticKeyB)[] result = Handshake.init(rng[], pk.toKeyPair(), flags) - let epkr = testValue("receiver_ephemeral_private_key") - result.ephemeral = PrivateKey.fromHex(epkr)[].toKeyPair() - let nonce = fromHex(stripSpaces(testValue("receiver_nonce"))) - result.responderNonce[0..^1] = nonce[0..^1] + result.ephemeral = PrivateKey.fromRaw(ephemeralKeyB)[].toKeyPair() + result.responderNonce[0..^1] = nonceB - test "Encrypt/Decrypt Hello packet test vectors": + test "Fail on pre-EIP8 messages": var initiator = newTestHandshake({Initiator}) var responder = newTestHandshake({Responder}) - var authm = fromHex(stripSpaces(testValue("auth_ciphertext"))) - var ackm = fromHex(stripSpaces(testValue("authresp_ciphertext"))) - var stateInitiator0, stateInitiator1: SecretState - var stateResponder0, stateResponder1: SecretState - responder.decodeAuthMessage(authm).expect("success") - initiator.decodeAckMessage(ackm).expect("success") + check: responder.decodeAuthMessage(auth1).isErr() + check: initiator.decodeAckMessage(ack1).isErr() - var csecInitiator = initiator.getSecrets(authm, ackm) - var csecResponder = responder.getSecrets(authm, ackm) - initSecretState(csecInitiator, stateInitiator0) - initSecretState(csecResponder, stateResponder0) - initSecretState(csecInitiator, stateInitiator1) - initSecretState(csecResponder, stateResponder1) - var packet0 = testValue("initiator_hello_packet") - var initiatorHello = fromHex(stripSpaces(packet0)) - var packet1 = testValue("receiver_hello_packet") - var responderHello = fromHex(stripSpaces(packet1)) - var header: array[RlpHeaderLength, byte] + test "Correct shared EIP-8 secret": + var initiator = newTestHandshake({Initiator}) + var responder = newTestHandshake({Responder}) - block: - check stateResponder0.decryptHeader(toOpenArray(initiatorHello, 0, 31), - header).isOk() - let bodysize = getBodySize(header) - check bodysize == 79 - # we need body size to be rounded to 16 bytes boundary to properly - # encrypt/decrypt it. - var body = newSeq[byte](decryptedLength(bodysize)) - var decrsize = 0 - check: - stateResponder0.decryptBody( - toOpenArray(initiatorHello, 32, len(initiatorHello) - 1), - getBodySize(header), body, decrsize).isOk() - decrsize == 79 - body.setLen(decrsize) - var hello = newSeq[byte](encryptedLength(bodysize)) - check: - stateInitiator1.encrypt(header, body, hello).isOk() - hello == initiatorHello - block: - check stateInitiator0.decryptHeader(toOpenArray(responderHello, 0, 31), - header).isOk() - let bodysize = getBodySize(header) - check bodysize == 79 - # we need body size to be rounded to 16 bytes boundary to properly - # encrypt/decrypt it. - var body = newSeq[byte](decryptedLength(bodysize)) - var decrsize = 0 - check: - stateInitiator0.decryptBody( - toOpenArray(responderHello, 32, len(initiatorHello) - 1), - getBodySize(header), body, decrsize).isOk() - decrsize == 79 - body.setLen(decrsize) - var hello = newSeq[byte](encryptedLength(bodysize)) - check: - stateResponder1.encrypt(header, body, hello).isOk() - hello == responderHello + check: responder.decodeAuthMessage(auth2).isOk() + check: initiator.decodeAckMessage(ack2).isOk() + + var csecResponder = responder.getSecrets(auth2, ack2) + + check: + csecResponder.aesKey == aesSecret2 + csecResponder.macKey == macSecret2 + var tmpMac = csecResponder.ingressMac + tmpMac.update("foo".toOpenArrayByte(0, 2)) + check: + tmpMac.finish().data == fromHex("0c7ec6340062cc46f5e9f1e3cf86f8c8c403c5a0964f5df0ebd34a75ddc86db5") + + test "Can parse auth/ack with extra bytes": + var initiator = newTestHandshake({Initiator}) + var responder = newTestHandshake({Responder}) + check: responder.decodeAuthMessage(auth3).isOk() + check: initiator.decodeAckMessage(ack3).isOk() test "Continuous stream of different lengths (1000 times)": var initiator = newTestHandshake({Initiator}) var responder = newTestHandshake({Responder}) - var m0 = newSeq[byte](initiator.authSize()) - var k0 = 0 - var k1 = 0 - check initiator.authMessage(rng[], responder.host.pubkey, - m0, k0).isOk + var m0 = newSeq[byte](AuthMessageMaxEIP8) + let k0 = initiator.authMessage(rng[], responder.host.pubkey, + m0).expect("correct buf size") m0.setLen(k0) check responder.decodeAuthMessage(m0).isOk - var m1 = newSeq[byte](responder.ackSize()) - check responder.ackMessage(rng[], m1, k1).isOk + var m1 = newSeq[byte](AckMessageMaxEIP8) + let k1 = responder.ackMessage(rng[], m1).expect("correct buf size") m1.setLen(k1) check initiator.decodeAckMessage(m1).isOk @@ -188,11 +159,9 @@ suite "Ethereum RLPx encryption/decryption test suite": var csecResponder = responder.getSecrets(m0, m1) var stateInitiator: SecretState var stateResponder: SecretState - var iheader, rheader: array[16, byte] + var iheader: array[16, byte] initSecretState(csecInitiator, stateInitiator) initSecretState(csecResponder, stateResponder) - burnMem(iheader) - burnMem(rheader) for i in 1..1000: # initiator -> responder block: @@ -205,23 +174,21 @@ suite "Ethereum RLPx encryption/decryption test suite": randomBytes(ibody) == len(ibody) stateInitiator.encrypt(iheader, ibody, encrypted).isOk() - stateResponder.decryptHeader(toOpenArray(encrypted, 0, 31), - rheader).isOk() + let rheader = stateResponder.decryptHeader( + toOpenArray(encrypted, 0, 31)).expect("valid data") + var length = getBodySize(rheader) check length == len(ibody) var rbody = newSeq[byte](decryptedLength(length)) - var decrsize = 0 check: stateResponder.decryptBody( toOpenArray(encrypted, 32, len(encrypted) - 1), - length, rbody, decrsize).isOk() - decrsize == length - rbody.setLen(decrsize) + length, rbody).isOk() + rbody.setLen(length) check: iheader == rheader ibody == rbody burnMem(iheader) - burnMem(rheader) # responder -> initiator block: var ibody = newSeq[byte](i * 3) @@ -233,20 +200,17 @@ suite "Ethereum RLPx encryption/decryption test suite": randomBytes(ibody) == len(ibody) stateResponder.encrypt(iheader, ibody, encrypted).isOk() - stateInitiator.decryptHeader(toOpenArray(encrypted, 0, 31), - rheader).isOk() + let rheader = stateInitiator.decryptHeader( + toOpenArray(encrypted, 0, 31)).expect("valid data") var length = getBodySize(rheader) check length == len(ibody) var rbody = newSeq[byte](decryptedLength(length)) - var decrsize = 0 check: stateInitiator.decryptBody( toOpenArray(encrypted, 32, len(encrypted) - 1), - length, rbody, decrsize).isOk() - decrsize == length + length, rbody).isOk() rbody.setLen(length) check: iheader == rheader ibody == rbody burnMem(iheader) - burnMem(rheader) diff --git a/tests/p2p/test_discoveryv5.nim b/tests/p2p/test_discoveryv5.nim index 4115739..8c8e5e1 100644 --- a/tests/p2p/test_discoveryv5.nim +++ b/tests/p2p/test_discoveryv5.nim @@ -417,6 +417,9 @@ suite "Discovery v5 Tests": let discoveredFiltered = lookupNode.randomNodes(10, ("test", @[byte 1,2,3,4])) check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode) + let discoveredEmpty = lookupNode.randomNodes(10, + proc(n: Node) : bool = false) + check discoveredEmpty.len == 0 await lookupNode.closeWait() diff --git a/tests/p2p/test_protocol_handlers.nim b/tests/p2p/test_protocol_handlers.nim index bf98685..df446df 100644 --- a/tests/p2p/test_protocol_handlers.nim +++ b/tests/p2p/test_protocol_handlers.nim @@ -31,8 +31,6 @@ p2pProtocol abc(version = 1, onPeerDisconnected do (peer: Peer, reason: DisconnectionReason) {.gcsafe.}: peer.networkState.count -= 1 - if true: - raise newException(CatchableError, "Fake abc exception") p2pProtocol xyz(version = 1, rlpxName = "xyz", @@ -45,8 +43,6 @@ p2pProtocol xyz(version = 1, onPeerDisconnected do (peer: Peer, reason: DisconnectionReason) {.gcsafe.}: peer.networkState.count -= 1 - if true: - raise newException(CatchableError, "Fake xyz exception") peer.state.status = "disconnected" p2pProtocol hah(version = 1, @@ -77,11 +73,10 @@ suite "Testing protocol handlers": await peer.disconnect(SubprotocolReason, true) check: - # we want to check that even though the exceptions in the disconnect - # handlers, each handler still ran + # all disconnection handlers are called node1.protocolState(abc).count == 0 node1.protocolState(xyz).count == 0 - peer.state(xyz).status == "connected" + peer.state(xyz).status == "disconnected" asyncTest "Failing connection handler": let rng = newRng() diff --git a/tests/p2p/test_rlpx_thunk.json b/tests/p2p/test_rlpx_thunk.json index 1b3a606..113068c 100644 --- a/tests/p2p/test_rlpx_thunk.json +++ b/tests/p2p/test_rlpx_thunk.json @@ -1,7 +1,7 @@ { "Invalid list when decoding for object": { "payload": "03", - "error": "MalformedRlpError", + "error": "PeerDisconnected", "description": "Object parameters are expected to be encoded in an RLP list" }, "Message id that is not supported": { @@ -21,62 +21,62 @@ }, "No Hash nor Status, but empty list": { "payload": "20c1c0", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Decoding to HashOrStatus expects blob of size 1 or 32" }, "No Hash nor Status, list instead of blob": { "payload": "20c2c1c0", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Decoding to HashOrStatus expects blob of size 1 or 32" }, "No Hash nor Status, blob of 2 bytes": { "payload": "20c4c3820011", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Decoding to HashOrStatus expects blob of size 1 or 32" }, "No Hash nor Status, blob of 33 bytes": { "payload": "20e3e2a100112233445566778899aabbccddeeff00112233445566778899aabbcceeddff33", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Decoding to HashOrStatus expects blob of size 1 or 32" }, "Listing elements when no data": { "payload": "01e1", - "error": "MalformedRlpError", + "error": "PeerDisconnected", "description": "listElem to error on empty list" }, "Listing elements when invalid length": { "payload": "01ffdada", - "error": "MalformedRlpError", + "error": "PeerDisconnected", "description": "listElem to error on invalid size encoding" }, "Listing single element list when having more entries": { "payload": "01c20420", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "listElem to assert on not a single entry list" }, "Listing single element list when having empty list": { "payload": "01c0", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "listElem to assert on not a single entry list" }, "DisconnectReason: single element list with entry out off enum range": { "payload": "01c111", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Disconnect reason code out of bounds 0..16 (got: 17)" }, "DisconnectReason: single element out off enum range": { "payload": "0111", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Disconnect reason code out of bounds 0..16 (got: 17)" }, "DisconnectReason: single element list with enum hole value": { "payload": "01c10C", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Error on Disconnect reason with enum hole value" }, "DisconnectReason: single element with enum hole value": { "payload": "010C", - "error": "RlpTypeMismatch", + "error": "PeerDisconnected", "description": "Error on Disconnect reason with enum hole value" }, "devp2p hello packet version 22 + additional list elements for EIP-8": { diff --git a/tests/p2p/test_rlpxtransport.nim b/tests/p2p/test_rlpxtransport.nim new file mode 100644 index 0000000..733167a --- /dev/null +++ b/tests/p2p/test_rlpxtransport.nim @@ -0,0 +1,60 @@ +{.used.} + +import + unittest2, + chronos/unittest2/asynctests, + ../../eth/common/keys, + ../../eth/p2p/rlpxtransport + +suite "RLPx transport": + setup: + let + rng = newRng() + keys1 = KeyPair.random(rng[]) + keys2 = KeyPair.random(rng[]) + server = createStreamServer(initTAddress("127.0.0.1:0"), {ReuseAddr}) + + teardown: + waitFor server.closeWait() + + asyncTest "Connect/accept": + const msg = @[byte 0, 1, 2, 3] + proc serveClient(server: StreamServer) {.async.} = + let transp = await server.accept() + let a = await RlpxTransport.accept(rng, keys1, transp) + await a.sendMsg(msg) + await a.closeWait() + + let serverFut = server.serveClient() + defer: + await serverFut.wait(1.seconds) + + let client = + await RlpxTransport.connect(rng, keys2, server.localAddress(), keys1.pubkey) + + defer: + await client.closeWait() + let rmsg = await client.recvMsg().wait(1.seconds) + + check: + msg == rmsg + + await serverFut + + asyncTest "Detect invalid pubkey": + proc serveClient(server: StreamServer) {.async.} = + let transp = await server.accept() + discard await RlpxTransport.accept(rng, keys1, transp) + raiseAssert "should fail to accept due to pubkey error" + + let serverFut = server.serveClient() + defer: + expect(RlpxTransportError): + await serverFut.wait(1.seconds) + + let keys3 = KeyPair.random(rng[]) + + # accept side should close connections + expect(TransportError): + discard + await RlpxTransport.connect(rng, keys2, server.localAddress(), keys3.pubkey) diff --git a/tests/rlp/all_tests.nim b/tests/rlp/all_tests.nim index 783b27d..625768c 100644 --- a/tests/rlp/all_tests.nim +++ b/tests/rlp/all_tests.nim @@ -2,4 +2,5 @@ import ./test_api_usage, ./test_json_suite, ./test_empty_string, - ./test_object_serialization + ./test_object_serialization, + ./test_optional_fields diff --git a/tests/rlp/test_api_usage.nim b/tests/rlp/test_api_usage.nim index 80fb998..cf9ad2f 100644 --- a/tests/rlp/test_api_usage.nim +++ b/tests/rlp/test_api_usage.nim @@ -21,8 +21,7 @@ proc test_blockBodyTranscode() = transactions: @[ Transaction(nonce: 1)]), BlockBody( - uncles: @[ - BlockHeader(nonce: BlockNonce([0x20u8,0,0,0,0,0,0,0]))]), + uncles: @[Header(nonce: Bytes8([0x20u8,0,0,0,0,0,0,0]))]), BlockBody(), BlockBody( transactions: @[ diff --git a/tests/rlp/test_optional_fields.nim b/tests/rlp/test_optional_fields.nim new file mode 100644 index 0000000..c234c0c --- /dev/null +++ b/tests/rlp/test_optional_fields.nim @@ -0,0 +1,68 @@ +{.used.} + +import + ../../eth/[rlp, common], + unittest2 + +# Optionals in between mandatory fields for the convenience of +# implementation. According to the spec all optionals appear +# after mandatory fields. Moreover, an empty optional field +# cannot and will not appear before a non-empty optional field + +type ObjectWithOptionals = object + a* : uint64 + b* : uint64 + c* : Opt[uint64] # should not count this as optional + d* : Opt[uint64] # should not count this as optional + e* : uint64 + f* : uint64 + g* : uint64 + h* : Opt[uint64] # should not count this as optional + i* : Opt[uint64] # should not count this as optional + j* : Opt[uint64] # should not count this as optional + k* : uint64 + l* : Opt[uint64] # should count this as an optional + m* : Opt[uint64] # should count this as an optional + n* : Opt[uint64] # should count this as an optional + +var + objWithEmptyOptional: ObjectWithOptionals + objWithNonEmptyOptional: ObjectWithOptionals + objWithNonEmptyTrailingOptionals: ObjectWithOptionals + objWithEmptyTrailingOptionals: ObjectWithOptionals + +objWithNonEmptyOptional.c = Opt.some(0'u64) +objWithNonEmptyOptional.d = Opt.some(0'u64) +objWithNonEmptyOptional.h = Opt.some(0'u64) +objWithNonEmptyOptional.i = Opt.some(0'u64) +objWithNonEmptyOptional.j = Opt.some(0'u64) +objWithNonEmptyOptional.l = Opt.some(0'u64) +objWithNonEmptyOptional.m = Opt.some(0'u64) +objWithNonEmptyOptional.n = Opt.some(0'u64) + +objWithNonEmptyTrailingOptionals.l = Opt.some(0'u64) +objWithNonEmptyTrailingOptionals.m = Opt.some(0'u64) +objWithNonEmptyTrailingOptionals.n = Opt.some(0'u64) + +objWithEmptyTrailingOptionals.c = Opt.some(0'u64) +objWithEmptyTrailingOptionals.d = Opt.some(0'u64) +objWithEmptyTrailingOptionals.h = Opt.some(0'u64) +objWithEmptyTrailingOptionals.i = Opt.some(0'u64) +objWithEmptyTrailingOptionals.j = Opt.some(0'u64) + +suite "test optional fields": + test "all optionals are empty": + let bytes = rlp.encode(objWithEmptyOptional) + check: bytes.len == 7 # 6 mandatory fields + prefix byte + + test "all optionals are non empty": + let bytes = rlp.encode(objWithNonEmptyOptional) + check: bytes.len == 15 # 6 mandatory + 8 optional + prefix + + test "Only trailing optionals are non empty": + let bytes = rlp.encode(objWithNonEmptyTrailingOptionals) + check: bytes.len == 10 # 6 mandatory + 3 trailing optional + prefix + + test "Only trailing optionals are empty": + let bytes = rlp.encode(objWithEmptyTrailingOptionals) + check: bytes.len == 12 # 6 mandatory + 5 non trailing + prefix