mirror of https://github.com/status-im/nim-eth.git
Moved eth-p2p to eth
This commit is contained in:
parent
afeb2c0b93
commit
e75a00f86e
|
@ -0,0 +1,163 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
import
|
||||||
|
tables, algorithm, random,
|
||||||
|
asyncdispatch2, asyncdispatch2/timer, chronicles,
|
||||||
|
eth/keys, eth/common/eth_types,
|
||||||
|
eth/p2p/[kademlia, discovery, enode, peer_pool, rlpx],
|
||||||
|
eth/p2p/private/p2p_types
|
||||||
|
|
||||||
|
export
|
||||||
|
p2p_types, rlpx, enode, kademlia
|
||||||
|
|
||||||
|
proc addCapability*(node: var EthereumNode, p: ProtocolInfo) =
|
||||||
|
assert node.connectionState == ConnectionState.None
|
||||||
|
|
||||||
|
let pos = lowerBound(node.protocols, p, rlpx.cmp)
|
||||||
|
node.protocols.insert(p, pos)
|
||||||
|
node.capabilities.insert(p.asCapability, pos)
|
||||||
|
|
||||||
|
if p.networkStateInitializer != nil:
|
||||||
|
node.protocolStates[p.index] = p.networkStateInitializer(node)
|
||||||
|
|
||||||
|
template addCapability*(node: var EthereumNode, Protocol: type) =
|
||||||
|
addCapability(node, Protocol.protocolInfo)
|
||||||
|
|
||||||
|
proc newEthereumNode*(keys: KeyPair,
|
||||||
|
address: Address,
|
||||||
|
networkId: uint,
|
||||||
|
chain: AbstractChainDB,
|
||||||
|
clientId = "nim-eth-p2p/0.2.0", # TODO: read this value from nimble somehow
|
||||||
|
addAllCapabilities = true,
|
||||||
|
useCompression: bool = false,
|
||||||
|
minPeers = 10): EthereumNode =
|
||||||
|
new result
|
||||||
|
result.keys = keys
|
||||||
|
result.networkId = networkId
|
||||||
|
result.clientId = clientId
|
||||||
|
result.protocols.newSeq 0
|
||||||
|
result.capabilities.newSeq 0
|
||||||
|
result.address = address
|
||||||
|
result.connectionState = ConnectionState.None
|
||||||
|
|
||||||
|
when useSnappy:
|
||||||
|
result.protocolVersion = if useCompression: devp2pSnappyVersion
|
||||||
|
else: devp2pVersion
|
||||||
|
|
||||||
|
result.protocolStates.newSeq allProtocols.len
|
||||||
|
|
||||||
|
result.peerPool = newPeerPool(result, networkId,
|
||||||
|
keys, nil,
|
||||||
|
clientId, address.tcpPort,
|
||||||
|
minPeers = minPeers)
|
||||||
|
|
||||||
|
if addAllCapabilities:
|
||||||
|
for p in allProtocols:
|
||||||
|
result.addCapability(p)
|
||||||
|
|
||||||
|
proc processIncoming(server: StreamServer,
|
||||||
|
remote: StreamTransport): Future[void] {.async, gcsafe.} =
|
||||||
|
var node = getUserData[EthereumNode](server)
|
||||||
|
let peerfut = node.rlpxAccept(remote)
|
||||||
|
yield peerfut
|
||||||
|
if not peerfut.failed:
|
||||||
|
let peer = peerfut.read()
|
||||||
|
if node.peerPool != nil:
|
||||||
|
if not node.peerPool.addPeer(peer):
|
||||||
|
# In case an outgoing connection was added in the meanwhile or a
|
||||||
|
# malicious peer opens multiple connections
|
||||||
|
debug "Disconnecting peer (incoming)", reason = AlreadyConnected
|
||||||
|
await peer.disconnect(AlreadyConnected)
|
||||||
|
else:
|
||||||
|
remote.close()
|
||||||
|
|
||||||
|
proc listeningAddress*(node: EthereumNode): ENode =
|
||||||
|
return initENode(node.keys.pubKey, node.address)
|
||||||
|
|
||||||
|
proc startListening*(node: EthereumNode) =
|
||||||
|
let ta = initTAddress(node.address.ip, node.address.tcpPort)
|
||||||
|
if node.listeningServer == nil:
|
||||||
|
node.listeningServer = createStreamServer(ta, processIncoming,
|
||||||
|
{ReuseAddr},
|
||||||
|
udata = cast[pointer](node))
|
||||||
|
node.listeningServer.start()
|
||||||
|
info "RLPx listener up", self = node.listeningAddress
|
||||||
|
|
||||||
|
proc connectToNetwork*(node: EthereumNode,
|
||||||
|
bootstrapNodes: seq[ENode],
|
||||||
|
startListening = true,
|
||||||
|
enableDiscovery = true) {.async.} =
|
||||||
|
assert node.connectionState == ConnectionState.None
|
||||||
|
|
||||||
|
node.connectionState = Connecting
|
||||||
|
node.discovery = newDiscoveryProtocol(node.keys.seckey,
|
||||||
|
node.address,
|
||||||
|
bootstrapNodes)
|
||||||
|
node.peerPool.discovery = node.discovery
|
||||||
|
|
||||||
|
if startListening:
|
||||||
|
p2p.startListening(node)
|
||||||
|
|
||||||
|
if enableDiscovery:
|
||||||
|
node.discovery.open()
|
||||||
|
await node.discovery.bootstrap()
|
||||||
|
else:
|
||||||
|
info "Discovery disabled"
|
||||||
|
|
||||||
|
node.peerPool.start()
|
||||||
|
|
||||||
|
while node.peerPool.connectedNodes.len == 0:
|
||||||
|
trace "Waiting for more peers", peers = node.peerPool.connectedNodes.len
|
||||||
|
await sleepAsync(500)
|
||||||
|
|
||||||
|
proc stopListening*(node: EthereumNode) =
|
||||||
|
node.listeningServer.stop()
|
||||||
|
|
||||||
|
iterator peers*(node: EthereumNode): Peer =
|
||||||
|
for peer in node.peerPool.peers:
|
||||||
|
yield peer
|
||||||
|
|
||||||
|
iterator peers*(node: EthereumNode, Protocol: type): Peer =
|
||||||
|
for peer in node.peerPool.peers(Protocol):
|
||||||
|
yield peer
|
||||||
|
|
||||||
|
iterator protocolPeers*(node: EthereumNode, Protocol: type): auto =
|
||||||
|
mixin state
|
||||||
|
for peer in node.peerPool.peers(Protocol):
|
||||||
|
yield peer.state(Protocol)
|
||||||
|
|
||||||
|
iterator randomPeers*(node: EthereumNode, maxPeers: int): Peer =
|
||||||
|
# TODO: this can be implemented more efficiently
|
||||||
|
|
||||||
|
# XXX: this doesn't compile, why?
|
||||||
|
# var peer = toSeq node.peers
|
||||||
|
var peers = newSeqOfCap[Peer](node.peerPool.connectedNodes.len)
|
||||||
|
for peer in node.peers: peers.add(peer)
|
||||||
|
|
||||||
|
shuffle(peers)
|
||||||
|
for i in 0 ..< min(maxPeers, peers.len):
|
||||||
|
yield peers[i]
|
||||||
|
|
||||||
|
proc randomPeer*(node: EthereumNode): Peer =
|
||||||
|
let peerIdx = random(node.peerPool.connectedNodes.len)
|
||||||
|
var i = 0
|
||||||
|
for peer in node.peers:
|
||||||
|
if i == peerIdx: return peer
|
||||||
|
inc i
|
||||||
|
|
||||||
|
proc randomPeerWith*(node: EthereumNode, Protocol: type): Peer =
|
||||||
|
mixin state
|
||||||
|
var candidates = newSeq[Peer]()
|
||||||
|
for p in node.peers(Protocol):
|
||||||
|
candidates.add(p)
|
||||||
|
if candidates.len > 0:
|
||||||
|
return candidates.rand()
|
||||||
|
|
|
@ -0,0 +1,543 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
## This module implements Ethereum authentication
|
||||||
|
|
||||||
|
import endians
|
||||||
|
import eth/[keys, rlp], nimcrypto
|
||||||
|
import ecies
|
||||||
|
|
||||||
|
const
|
||||||
|
SupportedRlpxVersion* = 4
|
||||||
|
PlainAuthMessageV4Length* = 194
|
||||||
|
AuthMessageV4Length* = 307
|
||||||
|
PlainAuthMessageEIP8Length = 169
|
||||||
|
PlainAuthMessageMaxEIP8* = PlainAuthMessageEIP8Length + 255
|
||||||
|
AuthMessageEIP8Length* = 282 + 2
|
||||||
|
AuthMessageMaxEIP8* = AuthMessageEIP8Length + 255
|
||||||
|
PlainAckMessageV4Length* = 97
|
||||||
|
AckMessageV4Length* = 210
|
||||||
|
PlainAckMessageEIP8Length* = 102
|
||||||
|
PlainAckMessageMaxEIP8* = PlainAckMessageEIP8Length + 255
|
||||||
|
AckMessageEIP8Length* = 215 + 2
|
||||||
|
AckMessageMaxEIP8* = AckMessageEIP8Length + 255
|
||||||
|
|
||||||
|
type
|
||||||
|
Nonce* = array[KeyLength, byte]
|
||||||
|
|
||||||
|
AuthMessageV4* = object {.packed.}
|
||||||
|
signature: array[RawSignatureSize, byte]
|
||||||
|
keyhash: array[keccak256.sizeDigest, byte]
|
||||||
|
pubkey: PublicKey
|
||||||
|
nonce: array[keccak256.sizeDigest, byte]
|
||||||
|
flag: byte
|
||||||
|
|
||||||
|
AckMessageV4* = object {.packed.}
|
||||||
|
pubkey: array[RawPublicKeySize, byte]
|
||||||
|
nonce: array[keccak256.sizeDigest, byte]
|
||||||
|
flag: byte
|
||||||
|
|
||||||
|
HandshakeFlag* = enum
|
||||||
|
Initiator, ## `Handshake` owner is connection initiator
|
||||||
|
Responder, ## `Handshake` owner is connection responder
|
||||||
|
Eip8 ## Flag indicates that EIP-8 handshake is used
|
||||||
|
|
||||||
|
AuthStatus* = enum
|
||||||
|
Success, ## Operation was successful
|
||||||
|
RandomError, ## Could not obtain random data
|
||||||
|
EcdhError, ## ECDH shared secret could not be calculated
|
||||||
|
BufferOverrun, ## Buffer overrun error
|
||||||
|
SignatureError, ## Signature could not be obtained
|
||||||
|
EciesError, ## ECIES encryption/decryption error
|
||||||
|
InvalidPubKey, ## Invalid public key
|
||||||
|
InvalidAuth, ## Invalid Authentication message
|
||||||
|
InvalidAck, ## Invalid Authentication ACK message
|
||||||
|
RlpError, ## Error while decoding RLP stream
|
||||||
|
IncompleteError ## Data incomplete error
|
||||||
|
|
||||||
|
Handshake* = object
|
||||||
|
version*: uint8 ## protocol version
|
||||||
|
flags*: set[HandshakeFlag] ## handshake flags
|
||||||
|
host*: KeyPair ## host keypair
|
||||||
|
ephemeral*: KeyPair ## ephemeral host keypair
|
||||||
|
remoteHPubkey*: PublicKey ## remote host public key
|
||||||
|
remoteEPubkey*: PublicKey ## remote host ephemeral public key
|
||||||
|
initiatorNonce*: Nonce ## initiator nonce
|
||||||
|
responderNonce*: Nonce ## responder nonce
|
||||||
|
expectedLength*: int ## expected incoming message length
|
||||||
|
|
||||||
|
ConnectionSecret* = object
|
||||||
|
aesKey*: array[aes256.sizeKey, byte]
|
||||||
|
macKey*: array[KeyLength, byte]
|
||||||
|
egressMac*: keccak256
|
||||||
|
ingressMac*: keccak256
|
||||||
|
|
||||||
|
AuthException* = object of Exception
|
||||||
|
|
||||||
|
template toa(a, b, c: untyped): untyped =
|
||||||
|
toOpenArray((a), (b), (b) + (c) - 1)
|
||||||
|
|
||||||
|
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
||||||
|
assert(len(a) == len(b))
|
||||||
|
for i in 0 ..< len(a):
|
||||||
|
a[i] = a[i] xor b[i]
|
||||||
|
|
||||||
|
proc newHandshake*(flags: set[HandshakeFlag] = {Initiator},
|
||||||
|
version: int = SupportedRlpxVersion): Handshake =
|
||||||
|
## Create new `Handshake` object.
|
||||||
|
result.version = byte(version and 0xFF)
|
||||||
|
result.flags = flags
|
||||||
|
result.ephemeral = newKeyPair()
|
||||||
|
if Initiator in flags:
|
||||||
|
result.expectedLength = AckMessageV4Length
|
||||||
|
if randomBytes(result.initiatorNonce) != len(result.initiatorNonce):
|
||||||
|
raise newException(AuthException, "Could not obtain random data!")
|
||||||
|
else:
|
||||||
|
result.expectedLength = AuthMessageV4Length
|
||||||
|
if randomBytes(result.responderNonce) != len(result.responderNonce):
|
||||||
|
raise newException(AuthException, "Could not obtain random data!")
|
||||||
|
|
||||||
|
proc authMessagePreEIP8(h: var Handshake,
|
||||||
|
pubkey: PublicKey,
|
||||||
|
output: var openarray[byte],
|
||||||
|
outlen: var int,
|
||||||
|
flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus =
|
||||||
|
## Create plain pre-EIP8 authentication message.
|
||||||
|
var
|
||||||
|
secret: SharedSecret
|
||||||
|
signature: Signature
|
||||||
|
buffer: array[PlainAuthMessageV4Length, byte]
|
||||||
|
flagb: byte
|
||||||
|
header: ptr AuthMessageV4
|
||||||
|
outlen = 0
|
||||||
|
flagb = byte(flag)
|
||||||
|
header = cast[ptr AuthMessageV4](addr buffer[0])
|
||||||
|
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
var xornonce = h.initiatorNonce
|
||||||
|
xornonce.sxor(secret.data)
|
||||||
|
if signRawMessage(xornonce, h.ephemeral.seckey,
|
||||||
|
signature) != EthKeysStatus.Success:
|
||||||
|
return(SignatureError)
|
||||||
|
h.remoteHPubkey = pubkey
|
||||||
|
header.signature = signature.getRaw()
|
||||||
|
header.keyhash = keccak256.digest(h.ephemeral.pubkey.getRaw()).data
|
||||||
|
header.pubkey = cast[PublicKey](h.host.pubkey.getRaw())
|
||||||
|
header.nonce = h.initiatorNonce
|
||||||
|
header.flag = flagb
|
||||||
|
if encrypt:
|
||||||
|
if len(output) < AuthMessageV4Length:
|
||||||
|
return(BufferOverrun)
|
||||||
|
if eciesEncrypt(buffer, output, h.remoteHPubkey) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
outlen = AuthMessageV4Length
|
||||||
|
result = Success
|
||||||
|
else:
|
||||||
|
if len(output) < PlainAuthMessageV4Length:
|
||||||
|
return(BufferOverrun)
|
||||||
|
copyMem(addr output[0], addr buffer[0], PlainAuthMessageV4Length)
|
||||||
|
outlen = PlainAuthMessageV4Length
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc authMessageEIP8(h: var Handshake,
|
||||||
|
pubkey: PublicKey,
|
||||||
|
output: var openarray[byte],
|
||||||
|
outlen: var int,
|
||||||
|
flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus =
|
||||||
|
## Create EIP8 authentication message.
|
||||||
|
var
|
||||||
|
secret: SharedSecret
|
||||||
|
signature: Signature
|
||||||
|
buffer: array[PlainAuthMessageMaxEIP8, byte]
|
||||||
|
padsize: byte
|
||||||
|
|
||||||
|
assert(EIP8 in h.flags)
|
||||||
|
outlen = 0
|
||||||
|
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
var xornonce = h.initiatorNonce
|
||||||
|
xornonce.sxor(secret.data)
|
||||||
|
if signRawMessage(xornonce, h.ephemeral.seckey,
|
||||||
|
signature) != EthKeysStatus.Success:
|
||||||
|
return(SignatureError)
|
||||||
|
h.remoteHPubkey = pubkey
|
||||||
|
var payload = rlp.encodeList(signature.getRaw(),
|
||||||
|
h.host.pubkey.getRaw(),
|
||||||
|
h.initiatorNonce,
|
||||||
|
[byte(h.version)])
|
||||||
|
assert(len(payload) == PlainAuthMessageEIP8Length)
|
||||||
|
let pencsize = eciesEncryptedLength(len(payload))
|
||||||
|
while true:
|
||||||
|
if randomBytes(addr padsize, 1) != 1:
|
||||||
|
return(RandomError)
|
||||||
|
if int(padsize) > (AuthMessageV4Length - (pencsize + 2)):
|
||||||
|
break
|
||||||
|
# It is possible to make packet size constant by uncommenting this line
|
||||||
|
# padsize = 24
|
||||||
|
var wosize = pencsize + int(padsize)
|
||||||
|
let fullsize = wosize + 2
|
||||||
|
if randomBytes(toa(buffer, PlainAuthMessageEIP8Length,
|
||||||
|
int(padsize))) != int(padsize):
|
||||||
|
return(RandomError)
|
||||||
|
if encrypt:
|
||||||
|
copyMem(addr buffer[0], addr payload[0], len(payload))
|
||||||
|
if len(output) < fullsize:
|
||||||
|
return(BufferOverrun)
|
||||||
|
bigEndian16(addr output, addr wosize)
|
||||||
|
if eciesEncrypt(toa(buffer, 0, len(payload) + int(padsize)),
|
||||||
|
toa(output, 2, wosize), pubkey,
|
||||||
|
toa(output, 0, 2)) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
outlen = fullsize
|
||||||
|
else:
|
||||||
|
let plainsize = len(payload) + int(padsize)
|
||||||
|
if len(output) < plainsize:
|
||||||
|
return(BufferOverrun)
|
||||||
|
copyMem(addr output[0], addr buffer[0], plainsize)
|
||||||
|
outlen = plainsize
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc ackMessagePreEIP8(h: var Handshake,
|
||||||
|
output: var openarray[byte],
|
||||||
|
outlen: var int,
|
||||||
|
flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus =
|
||||||
|
## Create plain pre-EIP8 authentication ack message.
|
||||||
|
var buffer: array[PlainAckMessageV4Length, byte]
|
||||||
|
outlen = 0
|
||||||
|
var header = cast[ptr AckMessageV4](addr buffer[0])
|
||||||
|
header.pubkey = h.ephemeral.pubkey.getRaw()
|
||||||
|
header.nonce = h.responderNonce
|
||||||
|
header.flag = byte(flag)
|
||||||
|
if encrypt:
|
||||||
|
if len(output) < AckMessageV4Length:
|
||||||
|
return(BufferOverrun)
|
||||||
|
if eciesEncrypt(buffer, output, h.remoteHPubkey) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
outlen = AckMessageV4Length
|
||||||
|
else:
|
||||||
|
if len(output) < PlainAckMessageV4Length:
|
||||||
|
return(BufferOverrun)
|
||||||
|
copyMem(addr output[0], addr buffer[0], PlainAckMessageV4Length)
|
||||||
|
outlen = PlainAckMessageV4Length
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc ackMessageEIP8(h: var Handshake,
|
||||||
|
output: var openarray[byte],
|
||||||
|
outlen: var int,
|
||||||
|
flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus =
|
||||||
|
## Create EIP8 authentication ack message.
|
||||||
|
var
|
||||||
|
buffer: array[PlainAckMessageMaxEIP8, byte]
|
||||||
|
padsize: byte
|
||||||
|
assert(EIP8 in h.flags)
|
||||||
|
var payload = rlp.encodeList(h.ephemeral.pubkey.getRaw(),
|
||||||
|
h.responderNonce,
|
||||||
|
[byte(h.version)])
|
||||||
|
assert(len(payload) == PlainAckMessageEIP8Length)
|
||||||
|
outlen = 0
|
||||||
|
let pencsize = eciesEncryptedLength(len(payload))
|
||||||
|
while true:
|
||||||
|
if randomBytes(addr padsize, 1) != 1:
|
||||||
|
return(RandomError)
|
||||||
|
if int(padsize) > (AckMessageV4Length - (pencsize + 2)):
|
||||||
|
break
|
||||||
|
# It is possible to make packet size constant by uncommenting this line
|
||||||
|
# padsize = 0
|
||||||
|
var wosize = pencsize + int(padsize)
|
||||||
|
let fullsize = wosize + 2
|
||||||
|
if int(padsize) > 0:
|
||||||
|
if randomBytes(toa(buffer, PlainAckMessageEIP8Length,
|
||||||
|
int(padsize))) != int(padsize):
|
||||||
|
return(RandomError)
|
||||||
|
copyMem(addr buffer[0], addr payload[0], len(payload))
|
||||||
|
if encrypt:
|
||||||
|
if len(output) < fullsize:
|
||||||
|
return(BufferOverrun)
|
||||||
|
bigEndian16(addr output, addr wosize)
|
||||||
|
if eciesEncrypt(toa(buffer, 0, len(payload) + int(padsize)),
|
||||||
|
toa(output, 2, wosize), h.remoteHPubkey,
|
||||||
|
toa(output, 0, 2)) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
outlen = fullsize
|
||||||
|
else:
|
||||||
|
let plainsize = len(payload) + int(padsize)
|
||||||
|
if len(output) < plainsize:
|
||||||
|
return(BufferOverrun)
|
||||||
|
copyMem(addr output[0], addr buffer[0], plainsize)
|
||||||
|
outlen = plainsize
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
template authSize*(h: Handshake, encrypt: bool = true): int =
|
||||||
|
## Get number of bytes needed to store AuthMessage.
|
||||||
|
if EIP8 in h.flags:
|
||||||
|
if encrypt: (AuthMessageMaxEIP8) else: (PlainAuthMessageMaxEIP8)
|
||||||
|
else:
|
||||||
|
if encrypt: (AuthMessageV4Length) else: (PlainAuthMessageV4Length)
|
||||||
|
|
||||||
|
template ackSize*(h: Handshake, encrypt: bool = true): int =
|
||||||
|
## Get number of bytes needed to store AckMessage.
|
||||||
|
if EIP8 in h.flags:
|
||||||
|
if encrypt: (AckMessageMaxEIP8) else: (PlainAckMessageMaxEIP8)
|
||||||
|
else:
|
||||||
|
if encrypt: (AckMessageV4Length) else: (PlainAckMessageV4Length)
|
||||||
|
|
||||||
|
proc authMessage*(h: var Handshake, pubkey: PublicKey,
|
||||||
|
output: var openarray[byte],
|
||||||
|
outlen: var int, flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus {.inline.} =
|
||||||
|
## Create new AuthMessage for specified `pubkey` and store it inside
|
||||||
|
## of `output`, size of generated AuthMessage will stored in `outlen`.
|
||||||
|
if EIP8 in h.flags:
|
||||||
|
result = authMessageEIP8(h, pubkey, output, outlen, flag, encrypt)
|
||||||
|
else:
|
||||||
|
result = authMessagePreEIP8(h, pubkey, output, outlen, flag, encrypt)
|
||||||
|
|
||||||
|
proc ackMessage*(h: var Handshake, output: var openarray[byte],
|
||||||
|
outlen: var int, flag: int = 0,
|
||||||
|
encrypt: bool = true): AuthStatus =
|
||||||
|
## Create new AckMessage and store it inside of `output`, size of generated
|
||||||
|
## AckMessage will stored in `outlen`.
|
||||||
|
if EIP8 in h.flags:
|
||||||
|
result = ackMessageEIP8(h, output, outlen, flag, encrypt)
|
||||||
|
else:
|
||||||
|
result = ackMessagePreEIP8(h, output, outlen, flag, encrypt)
|
||||||
|
|
||||||
|
proc decodeAuthMessageV4(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes V4 AuthMessage.
|
||||||
|
var
|
||||||
|
secret: SharedSecret
|
||||||
|
buffer: array[PlainAuthMessageV4Length, byte]
|
||||||
|
pubkey: PublicKey
|
||||||
|
assert(Responder in h.flags)
|
||||||
|
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
var header = cast[ptr AuthMessageV4](addr buffer[0])
|
||||||
|
if recoverPublicKey(header.pubkey.data, pubkey) != EthKeysStatus.Success:
|
||||||
|
return(InvalidPubKey)
|
||||||
|
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
var xornonce = header.nonce
|
||||||
|
xornonce.sxor(secret.data)
|
||||||
|
if recoverSignatureKey(header.signature, xornonce,
|
||||||
|
h.remoteEPubkey) != EthKeysStatus.Success:
|
||||||
|
return(SignatureError)
|
||||||
|
h.initiatorNonce = header.nonce
|
||||||
|
h.remoteHPubkey = pubkey
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc decodeAuthMessageEip8(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes EIP-8 AuthMessage.
|
||||||
|
var
|
||||||
|
pubkey: PublicKey
|
||||||
|
nonce: Nonce
|
||||||
|
secret: SharedSecret
|
||||||
|
size: uint16
|
||||||
|
|
||||||
|
bigEndian16(addr size, unsafeAddr m[0])
|
||||||
|
h.expectedLength = int(size) + 2
|
||||||
|
if h.expectedLength > len(m):
|
||||||
|
return(IncompleteError)
|
||||||
|
var buffer = newSeq[byte](eciesDecryptedLength(int(size)))
|
||||||
|
if eciesDecrypt(toa(m, 2, int(size)), buffer, h.host.seckey,
|
||||||
|
toa(m, 0, 2)) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
try:
|
||||||
|
var reader = rlpFromBytes(buffer.toRange())
|
||||||
|
if not reader.isList() or reader.listLen() < 4:
|
||||||
|
return(InvalidAuth)
|
||||||
|
if reader.listElem(0).blobLen != RawSignatureSize:
|
||||||
|
return(InvalidAuth)
|
||||||
|
if reader.listElem(1).blobLen != RawPublicKeySize:
|
||||||
|
return(InvalidAuth)
|
||||||
|
if reader.listElem(2).blobLen != KeyLength:
|
||||||
|
return(InvalidAuth)
|
||||||
|
if reader.listElem(3).blobLen != 1:
|
||||||
|
return(InvalidAuth)
|
||||||
|
var signatureBr = reader.listElem(0).toBytes()
|
||||||
|
var pubkeyBr = reader.listElem(1).toBytes()
|
||||||
|
var nonceBr = reader.listElem(2).toBytes()
|
||||||
|
var versionBr = reader.listElem(3).toBytes()
|
||||||
|
if recoverPublicKey(pubkeyBr.toOpenArray(),
|
||||||
|
pubkey) != EthKeysStatus.Success:
|
||||||
|
return(InvalidPubKey)
|
||||||
|
copyMem(addr nonce[0], nonceBr.baseAddr, KeyLength)
|
||||||
|
if ecdhAgree(h.host.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
var xornonce = nonce
|
||||||
|
xornonce.sxor(secret.data)
|
||||||
|
if recoverSignatureKey(signatureBr.toOpenArray(),
|
||||||
|
xornonce,
|
||||||
|
h.remoteEPubkey) != EthKeysStatus.Success:
|
||||||
|
return(SignatureError)
|
||||||
|
h.initiatorNonce = nonce
|
||||||
|
h.remoteHPubkey = pubkey
|
||||||
|
h.version = cast[ptr byte](versionBr.baseAddr)[]
|
||||||
|
result = Success
|
||||||
|
except:
|
||||||
|
result = RlpError
|
||||||
|
|
||||||
|
proc decodeAckMessageEip8*(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes EIP-8 AckMessage.
|
||||||
|
var size: uint16
|
||||||
|
bigEndian16(addr size, unsafeAddr m[0])
|
||||||
|
h.expectedLength = 2 + int(size)
|
||||||
|
if h.expectedLength > len(m):
|
||||||
|
return(IncompleteError)
|
||||||
|
var buffer = newSeq[byte](eciesDecryptedLength(int(size)))
|
||||||
|
if eciesDecrypt(toa(m, 2, int(size)), buffer, h.host.seckey,
|
||||||
|
toa(m, 0, 2)) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
try:
|
||||||
|
var reader = rlpFromBytes(buffer.toRange())
|
||||||
|
if not reader.isList() or reader.listLen() < 3:
|
||||||
|
return(InvalidAck)
|
||||||
|
if reader.listElem(0).blobLen != RawPublicKeySize:
|
||||||
|
return(InvalidAck)
|
||||||
|
if reader.listElem(1).blobLen != KeyLength:
|
||||||
|
return(InvalidAck)
|
||||||
|
if reader.listElem(2).blobLen != 1:
|
||||||
|
return(InvalidAck)
|
||||||
|
let pubkeyBr = reader.listElem(0).toBytes()
|
||||||
|
let nonceBr = reader.listElem(1).toBytes()
|
||||||
|
let versionBr = reader.listElem(2).toBytes()
|
||||||
|
if recoverPublicKey(pubkeyBr.toOpenArray(),
|
||||||
|
h.remoteEPubkey) != EthKeysStatus.Success:
|
||||||
|
return(InvalidPubKey)
|
||||||
|
copyMem(addr h.responderNonce[0], nonceBr.baseAddr, KeyLength)
|
||||||
|
h.version = cast[ptr byte](versionBr.baseAddr)[]
|
||||||
|
result = Success
|
||||||
|
except:
|
||||||
|
result = RlpError
|
||||||
|
|
||||||
|
proc decodeAckMessageV4(h: var Handshake, m: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes V4 AckMessage.
|
||||||
|
var
|
||||||
|
buffer: array[PlainAckMessageV4Length, byte]
|
||||||
|
assert(Initiator in h.flags)
|
||||||
|
if eciesDecrypt(m, buffer, h.host.seckey) != EciesStatus.Success:
|
||||||
|
return(EciesError)
|
||||||
|
var header = cast[ptr AckMessageV4](addr buffer[0])
|
||||||
|
if recoverPublicKey(header.pubkey, h.remoteEPubkey) != EthKeysStatus.Success:
|
||||||
|
return(InvalidPubKey)
|
||||||
|
h.responderNonce = header.nonce
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc decodeAuthMessage*(h: var Handshake, input: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes AuthMessage from `input`.
|
||||||
|
if len(input) < AuthMessageV4Length:
|
||||||
|
result = IncompleteError
|
||||||
|
elif len(input) == AuthMessageV4Length:
|
||||||
|
var res = h.decodeAuthMessageV4(input)
|
||||||
|
if res != Success:
|
||||||
|
res = h.decodeAuthMessageEip8(input)
|
||||||
|
if res != Success:
|
||||||
|
result = res
|
||||||
|
else:
|
||||||
|
h.flags.incl(EIP8)
|
||||||
|
result = Success
|
||||||
|
else:
|
||||||
|
result = Success
|
||||||
|
else:
|
||||||
|
result = h.decodeAuthMessageEip8(input)
|
||||||
|
if result == Success:
|
||||||
|
h.flags.incl(EIP8)
|
||||||
|
|
||||||
|
proc decodeAckMessage*(h: var Handshake, input: openarray[byte]): AuthStatus =
|
||||||
|
## Decodes AckMessage from `input`.
|
||||||
|
if len(input) < AckMessageV4Length:
|
||||||
|
return(IncompleteError)
|
||||||
|
elif len(input) == AckMessageV4Length:
|
||||||
|
var res = h.decodeAckMessageV4(input)
|
||||||
|
if res != Success:
|
||||||
|
res = h.decodeAckMessageEip8(input)
|
||||||
|
if res != Success:
|
||||||
|
result = res
|
||||||
|
else:
|
||||||
|
h.flags.incl(EIP8)
|
||||||
|
result = Success
|
||||||
|
else:
|
||||||
|
result = Success
|
||||||
|
else:
|
||||||
|
result = h.decodeAckMessageEip8(input)
|
||||||
|
if result == Success:
|
||||||
|
h.flags.incl(EIP8)
|
||||||
|
|
||||||
|
proc getSecrets*(h: Handshake, authmsg: openarray[byte],
|
||||||
|
ackmsg: openarray[byte],
|
||||||
|
secret: var ConnectionSecret): AuthStatus =
|
||||||
|
## Derive secrets from handshake `h` using encrypted AuthMessage `authmsg` and
|
||||||
|
## encrypted AckMessage `ackmsg`.
|
||||||
|
var
|
||||||
|
shsec: SharedSecret
|
||||||
|
ctx0: keccak256
|
||||||
|
ctx1: keccak256
|
||||||
|
mac1: MDigest[256]
|
||||||
|
xornonce: Nonce
|
||||||
|
|
||||||
|
# ecdhe-secret = ecdh.agree(ephemeral-privkey, remote-ephemeral-pubk)
|
||||||
|
if ecdhAgree(h.ephemeral.seckey, h.remoteEPubkey,
|
||||||
|
shsec) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
|
||||||
|
# shared-secret = keccak(ecdhe-secret || keccak(nonce || initiator-nonce))
|
||||||
|
ctx0.init()
|
||||||
|
ctx1.init()
|
||||||
|
ctx1.update(h.responderNonce)
|
||||||
|
ctx1.update(h.initiatorNonce)
|
||||||
|
mac1 = ctx1.finish()
|
||||||
|
ctx1.clear()
|
||||||
|
ctx0.update(shsec.data)
|
||||||
|
ctx0.update(mac1.data)
|
||||||
|
mac1 = ctx0.finish()
|
||||||
|
|
||||||
|
# aes-secret = keccak(ecdhe-secret || shared-secret)
|
||||||
|
ctx0.init()
|
||||||
|
ctx0.update(shsec.data)
|
||||||
|
ctx0.update(mac1.data)
|
||||||
|
mac1 = ctx0.finish()
|
||||||
|
|
||||||
|
# mac-secret = keccak(ecdhe-secret || aes-secret)
|
||||||
|
ctx0.init()
|
||||||
|
ctx0.update(shsec.data)
|
||||||
|
ctx0.update(mac1.data)
|
||||||
|
secret.aesKey = mac1.data
|
||||||
|
mac1 = ctx0.finish()
|
||||||
|
secret.macKey = mac1.data
|
||||||
|
|
||||||
|
burnMem(shsec)
|
||||||
|
# egress-mac = keccak256(mac-secret ^ recipient-nonce || auth-sent-init)
|
||||||
|
xornonce = mac1.data
|
||||||
|
xornonce.sxor(h.responderNonce)
|
||||||
|
ctx0.init()
|
||||||
|
ctx0.update(xornonce)
|
||||||
|
ctx0.update(authmsg)
|
||||||
|
|
||||||
|
# ingress-mac = keccak256(mac-secret ^ initiator-nonce || auth-recvd-ack)
|
||||||
|
xornonce = secret.macKey
|
||||||
|
xornonce.sxor(h.initiatorNonce)
|
||||||
|
ctx1.init()
|
||||||
|
ctx1.update(xornonce)
|
||||||
|
ctx1.update(ackmsg)
|
||||||
|
burnMem(xornonce)
|
||||||
|
|
||||||
|
if Initiator in h.flags:
|
||||||
|
secret.egressMac = ctx0
|
||||||
|
secret.ingressMac = ctx1
|
||||||
|
else:
|
||||||
|
secret.ingressMac = ctx0
|
||||||
|
secret.egressMac = ctx1
|
||||||
|
|
||||||
|
ctx0.clear()
|
||||||
|
ctx1.clear()
|
||||||
|
result = Success
|
|
@ -0,0 +1,362 @@
|
||||||
|
import
|
||||||
|
sets, options, random, hashes,
|
||||||
|
asyncdispatch2, chronicles, eth/common/eth_types,
|
||||||
|
private/p2p_types, rlpx, peer_pool, rlpx_protocols/eth_protocol,
|
||||||
|
../p2p
|
||||||
|
|
||||||
|
const
|
||||||
|
minPeersToStartSync* = 2 # Wait for consensus of at least this
|
||||||
|
# number of peers before syncing
|
||||||
|
|
||||||
|
type
|
||||||
|
SyncStatus* = enum
|
||||||
|
syncSuccess
|
||||||
|
syncNotEnoughPeers
|
||||||
|
syncTimeOut
|
||||||
|
|
||||||
|
WantedBlocksState = enum
|
||||||
|
Initial,
|
||||||
|
Requested,
|
||||||
|
Received,
|
||||||
|
Persisted
|
||||||
|
|
||||||
|
WantedBlocks = object
|
||||||
|
startIndex: BlockNumber
|
||||||
|
numBlocks: uint
|
||||||
|
state: WantedBlocksState
|
||||||
|
headers: seq[BlockHeader]
|
||||||
|
bodies: seq[BlockBody]
|
||||||
|
|
||||||
|
SyncContext = ref object
|
||||||
|
workQueue: seq[WantedBlocks]
|
||||||
|
endBlockNumber: BlockNumber
|
||||||
|
finalizedBlock: BlockNumber # Block which was downloaded and verified
|
||||||
|
chain: AbstractChainDB
|
||||||
|
peerPool: PeerPool
|
||||||
|
trustedPeers: HashSet[Peer]
|
||||||
|
hasOutOfOrderBlocks: bool
|
||||||
|
|
||||||
|
proc hash*(p: Peer): Hash {.inline.} = hash(cast[pointer](p))
|
||||||
|
|
||||||
|
proc endIndex(b: WantedBlocks): BlockNumber =
|
||||||
|
result = b.startIndex
|
||||||
|
result += (b.numBlocks - 1).u256
|
||||||
|
|
||||||
|
proc availableWorkItem(ctx: SyncContext): int =
|
||||||
|
var maxPendingBlock = ctx.finalizedBlock
|
||||||
|
trace "queue len", length = ctx.workQueue.len
|
||||||
|
result = -1
|
||||||
|
for i in 0 .. ctx.workQueue.high:
|
||||||
|
case ctx.workQueue[i].state
|
||||||
|
of Initial:
|
||||||
|
return i
|
||||||
|
of Persisted:
|
||||||
|
result = i
|
||||||
|
else:
|
||||||
|
discard
|
||||||
|
|
||||||
|
let eb = ctx.workQueue[i].endIndex
|
||||||
|
if eb > maxPendingBlock: maxPendingBlock = eb
|
||||||
|
|
||||||
|
let nextRequestedBlock = maxPendingBlock + 1
|
||||||
|
if nextRequestedBlock >= ctx.endBlockNumber:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
if result == -1:
|
||||||
|
result = ctx.workQueue.len
|
||||||
|
ctx.workQueue.setLen(result + 1)
|
||||||
|
|
||||||
|
var numBlocks = (ctx.endBlockNumber - nextRequestedBlock).toInt
|
||||||
|
if numBlocks > maxHeadersFetch:
|
||||||
|
numBlocks = maxHeadersFetch
|
||||||
|
ctx.workQueue[result] = WantedBlocks(startIndex: nextRequestedBlock, numBlocks: numBlocks.uint, state: Initial)
|
||||||
|
|
||||||
|
proc persistWorkItem(ctx: SyncContext, wi: var WantedBlocks) =
|
||||||
|
case ctx.chain.persistBlocks(wi.headers, wi.bodies)
|
||||||
|
of ValidationResult.OK:
|
||||||
|
ctx.finalizedBlock = wi.endIndex
|
||||||
|
wi.state = Persisted
|
||||||
|
of ValidationResult.Error:
|
||||||
|
wi.state = Initial
|
||||||
|
# successful or not, we're done with these blocks
|
||||||
|
wi.headers.setLen(0)
|
||||||
|
wi.bodies.setLen(0)
|
||||||
|
|
||||||
|
proc persistPendingWorkItems(ctx: SyncContext) =
|
||||||
|
var nextStartIndex = ctx.finalizedBlock + 1
|
||||||
|
var keepRunning = true
|
||||||
|
var hasOutOfOrderBlocks = false
|
||||||
|
trace "Looking for out of order blocks"
|
||||||
|
while keepRunning:
|
||||||
|
keepRunning = false
|
||||||
|
hasOutOfOrderBlocks = false
|
||||||
|
for i in 0 ..< ctx.workQueue.len:
|
||||||
|
let start = ctx.workQueue[i].startIndex
|
||||||
|
if ctx.workQueue[i].state == Received:
|
||||||
|
if start == nextStartIndex:
|
||||||
|
trace "Persisting pending work item", start
|
||||||
|
ctx.persistWorkItem(ctx.workQueue[i])
|
||||||
|
nextStartIndex = ctx.finalizedBlock + 1
|
||||||
|
keepRunning = true
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
hasOutOfOrderBlocks = true
|
||||||
|
|
||||||
|
ctx.hasOutOfOrderBlocks = hasOutOfOrderBlocks
|
||||||
|
|
||||||
|
proc returnWorkItem(ctx: SyncContext, workItem: int): ValidationResult =
|
||||||
|
let wi = addr ctx.workQueue[workItem]
|
||||||
|
let askedBlocks = wi.numBlocks.int
|
||||||
|
let receivedBlocks = wi.headers.len
|
||||||
|
let start = wi.startIndex
|
||||||
|
|
||||||
|
if askedBlocks == receivedBlocks:
|
||||||
|
trace "Work item complete",
|
||||||
|
start,
|
||||||
|
askedBlocks,
|
||||||
|
receivedBlocks
|
||||||
|
|
||||||
|
if wi.startIndex != ctx.finalizedBlock + 1:
|
||||||
|
trace "Blocks out of order", start, final = ctx.finalizedBlock
|
||||||
|
ctx.hasOutOfOrderBlocks = true
|
||||||
|
|
||||||
|
if ctx.hasOutOfOrderBlocks:
|
||||||
|
ctx.persistPendingWorkItems()
|
||||||
|
else:
|
||||||
|
ctx.persistWorkItem(wi[])
|
||||||
|
else:
|
||||||
|
trace "Work item complete but we got fewer blocks than requested, so we're ditching the whole thing.",
|
||||||
|
start,
|
||||||
|
askedBlocks,
|
||||||
|
receivedBlocks
|
||||||
|
return ValidationResult.Error
|
||||||
|
|
||||||
|
proc newSyncContext(chain: AbstractChainDB, peerPool: PeerPool): SyncContext =
|
||||||
|
new result
|
||||||
|
result.chain = chain
|
||||||
|
result.peerPool = peerPool
|
||||||
|
result.trustedPeers = initSet[Peer]()
|
||||||
|
result.finalizedBlock = chain.getBestBlockHeader().blockNumber
|
||||||
|
|
||||||
|
proc handleLostPeer(ctx: SyncContext) =
|
||||||
|
# TODO: ask the PeerPool for new connections and then call
|
||||||
|
# `obtainBlocksFromPeer`
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc getBestBlockNumber(p: Peer): Future[BlockNumber] {.async.} =
|
||||||
|
let request = BlocksRequest(
|
||||||
|
startBlock: HashOrNum(isHash: true,
|
||||||
|
hash: p.state(eth).bestBlockHash),
|
||||||
|
maxResults: 1,
|
||||||
|
skip: 0,
|
||||||
|
reverse: true)
|
||||||
|
|
||||||
|
let latestBlock = await p.getBlockHeaders(request)
|
||||||
|
|
||||||
|
if latestBlock.isSome and latestBlock.get.headers.len > 0:
|
||||||
|
result = latestBlock.get.headers[0].blockNumber
|
||||||
|
|
||||||
|
proc obtainBlocksFromPeer(syncCtx: SyncContext, peer: Peer) {.async.} =
|
||||||
|
# Update our best block number
|
||||||
|
try:
|
||||||
|
let bestBlockNumber = await peer.getBestBlockNumber()
|
||||||
|
if bestBlockNumber > syncCtx.endBlockNumber:
|
||||||
|
trace "New sync end block number", number = bestBlockNumber
|
||||||
|
syncCtx.endBlockNumber = bestBlockNumber
|
||||||
|
except:
|
||||||
|
debug "Exception in getBestBlockNumber()",
|
||||||
|
exc = getCurrentException().name,
|
||||||
|
err = getCurrentExceptionMsg()
|
||||||
|
# no need to exit here, because the context might still have blocks to fetch
|
||||||
|
# from this peer
|
||||||
|
|
||||||
|
while (let workItemIdx = syncCtx.availableWorkItem(); workItemIdx != -1):
|
||||||
|
template workItem: auto = syncCtx.workQueue[workItemIdx]
|
||||||
|
workItem.state = Requested
|
||||||
|
trace "Requesting block headers", start = workItem.startIndex, count = workItem.numBlocks, peer
|
||||||
|
let request = BlocksRequest(
|
||||||
|
startBlock: HashOrNum(isHash: false, number: workItem.startIndex),
|
||||||
|
maxResults: workItem.numBlocks,
|
||||||
|
skip: 0,
|
||||||
|
reverse: false)
|
||||||
|
|
||||||
|
var dataReceived = false
|
||||||
|
try:
|
||||||
|
let results = await peer.getBlockHeaders(request)
|
||||||
|
if results.isSome:
|
||||||
|
shallowCopy(workItem.headers, results.get.headers)
|
||||||
|
|
||||||
|
var bodies = newSeq[BlockBody]()
|
||||||
|
var hashes = newSeq[KeccakHash]()
|
||||||
|
var nextIndex = workItem.startIndex
|
||||||
|
for i in workItem.headers:
|
||||||
|
if i.blockNumber != nextIndex:
|
||||||
|
raise newException(Exception, "The block numbers are not in sequence. Not processing this workItem.")
|
||||||
|
else:
|
||||||
|
nextIndex = nextIndex + 1
|
||||||
|
hashes.add(blockHash(i))
|
||||||
|
if hashes.len == maxBodiesFetch:
|
||||||
|
let b = await peer.getBlockBodies(hashes)
|
||||||
|
hashes.setLen(0)
|
||||||
|
bodies.add(b.get.blocks)
|
||||||
|
|
||||||
|
if hashes.len != 0:
|
||||||
|
let b = await peer.getBlockBodies(hashes)
|
||||||
|
bodies.add(b.get.blocks)
|
||||||
|
|
||||||
|
if bodies.len == workItem.headers.len:
|
||||||
|
shallowCopy(workItem.bodies, bodies)
|
||||||
|
dataReceived = true
|
||||||
|
else:
|
||||||
|
warn "Bodies len != headers.len", bodies = bodies.len, headers = workItem.headers.len
|
||||||
|
except:
|
||||||
|
# the success case sets `dataReceived`, so we can just fall back to the
|
||||||
|
# failure path below. If we signal time-outs with exceptions such
|
||||||
|
# failures will be easier to handle.
|
||||||
|
debug "Exception in obtainBlocksFromPeer()",
|
||||||
|
exc = getCurrentException().name,
|
||||||
|
err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
var giveUpOnPeer = false
|
||||||
|
|
||||||
|
if dataReceived:
|
||||||
|
workItem.state = Received
|
||||||
|
if syncCtx.returnWorkItem(workItemIdx) != ValidationResult.OK:
|
||||||
|
giveUpOnPeer = true
|
||||||
|
else:
|
||||||
|
giveUpOnPeer = true
|
||||||
|
|
||||||
|
if giveUpOnPeer:
|
||||||
|
workItem.state = Initial
|
||||||
|
try:
|
||||||
|
await peer.disconnect(SubprotocolReason)
|
||||||
|
except:
|
||||||
|
discard
|
||||||
|
syncCtx.handleLostPeer()
|
||||||
|
break
|
||||||
|
|
||||||
|
trace "Finished obtaining blocks", peer
|
||||||
|
|
||||||
|
proc peersAgreeOnChain(a, b: Peer): Future[bool] {.async.} =
|
||||||
|
# Returns true if one of the peers acknowledges existence of the best block
|
||||||
|
# of another peer.
|
||||||
|
var
|
||||||
|
a = a
|
||||||
|
b = b
|
||||||
|
|
||||||
|
if a.state(eth).bestDifficulty < b.state(eth).bestDifficulty:
|
||||||
|
swap(a, b)
|
||||||
|
|
||||||
|
let request = BlocksRequest(
|
||||||
|
startBlock: HashOrNum(isHash: true,
|
||||||
|
hash: b.state(eth).bestBlockHash),
|
||||||
|
maxResults: 1,
|
||||||
|
skip: 0,
|
||||||
|
reverse: true)
|
||||||
|
|
||||||
|
let latestBlock = await a.getBlockHeaders(request)
|
||||||
|
result = latestBlock.isSome and latestBlock.get.headers.len > 0
|
||||||
|
|
||||||
|
proc randomTrustedPeer(ctx: SyncContext): Peer =
|
||||||
|
var k = rand(ctx.trustedPeers.len - 1)
|
||||||
|
var i = 0
|
||||||
|
for p in ctx.trustedPeers:
|
||||||
|
result = p
|
||||||
|
if i == k: return
|
||||||
|
inc i
|
||||||
|
|
||||||
|
proc startSyncWithPeer(ctx: SyncContext, peer: Peer) {.async.} =
|
||||||
|
trace "start sync", peer, trustedPeers = ctx.trustedPeers.len
|
||||||
|
if ctx.trustedPeers.len >= minPeersToStartSync:
|
||||||
|
# We have enough trusted peers. Validate new peer against trusted
|
||||||
|
if await peersAgreeOnChain(peer, ctx.randomTrustedPeer()):
|
||||||
|
ctx.trustedPeers.incl(peer)
|
||||||
|
asyncCheck ctx.obtainBlocksFromPeer(peer)
|
||||||
|
elif ctx.trustedPeers.len == 0:
|
||||||
|
# Assume the peer is trusted, but don't start sync until we reevaluate
|
||||||
|
# it with more peers
|
||||||
|
trace "Assume trusted peer", peer
|
||||||
|
ctx.trustedPeers.incl(peer)
|
||||||
|
else:
|
||||||
|
# At this point we have some "trusted" candidates, but they are not
|
||||||
|
# "trusted" enough. We evaluate `peer` against all other candidates.
|
||||||
|
# If one of the candidates disagrees, we swap it for `peer`. If all
|
||||||
|
# candidates agree, we add `peer` to trusted set. The peers in the set
|
||||||
|
# will become "fully trusted" (and sync will start) when the set is big
|
||||||
|
# enough
|
||||||
|
var
|
||||||
|
agreeScore = 0
|
||||||
|
disagreedPeer: Peer
|
||||||
|
|
||||||
|
for tp in ctx.trustedPeers:
|
||||||
|
if await peersAgreeOnChain(peer, tp):
|
||||||
|
inc agreeScore
|
||||||
|
else:
|
||||||
|
disagreedPeer = tp
|
||||||
|
|
||||||
|
let disagreeScore = ctx.trustedPeers.len - agreeScore
|
||||||
|
|
||||||
|
if agreeScore == ctx.trustedPeers.len:
|
||||||
|
ctx.trustedPeers.incl(peer) # The best possible outcome
|
||||||
|
elif disagreeScore == 1:
|
||||||
|
trace "Peer is no longer trusted for sync", peer
|
||||||
|
ctx.trustedPeers.excl(disagreedPeer)
|
||||||
|
ctx.trustedPeers.incl(peer)
|
||||||
|
else:
|
||||||
|
trace "Peer not trusted for sync", peer
|
||||||
|
|
||||||
|
if ctx.trustedPeers.len == minPeersToStartSync:
|
||||||
|
for p in ctx.trustedPeers:
|
||||||
|
asyncCheck ctx.obtainBlocksFromPeer(p)
|
||||||
|
|
||||||
|
|
||||||
|
proc onPeerConnected(ctx: SyncContext, peer: Peer) =
|
||||||
|
trace "New candidate for sync", peer
|
||||||
|
try:
|
||||||
|
let f = ctx.startSyncWithPeer(peer)
|
||||||
|
f.callback = proc(data: pointer) {.gcsafe.} =
|
||||||
|
if f.failed:
|
||||||
|
error "startSyncWithPeer failed", msg = f.readError.msg, peer
|
||||||
|
except:
|
||||||
|
debug "Exception in startSyncWithPeer()",
|
||||||
|
exc = getCurrentException().name,
|
||||||
|
err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
|
||||||
|
proc onPeerDisconnected(ctx: SyncContext, p: Peer) =
|
||||||
|
trace "peer disconnected ", peer = p
|
||||||
|
ctx.trustedPeers.excl(p)
|
||||||
|
|
||||||
|
proc startSync(ctx: SyncContext) =
|
||||||
|
var po: PeerObserver
|
||||||
|
po.onPeerConnected = proc(p: Peer) {.gcsafe.} =
|
||||||
|
ctx.onPeerConnected(p)
|
||||||
|
|
||||||
|
po.onPeerDisconnected = proc(p: Peer) {.gcsafe.} =
|
||||||
|
ctx.onPeerDisconnected(p)
|
||||||
|
|
||||||
|
ctx.peerPool.addObserver(ctx, po)
|
||||||
|
|
||||||
|
proc findBestPeer(node: EthereumNode): (Peer, DifficultyInt) =
|
||||||
|
var
|
||||||
|
bestBlockDifficulty: DifficultyInt = 0.stuint(256)
|
||||||
|
bestPeer: Peer = nil
|
||||||
|
|
||||||
|
for peer in node.peers(eth):
|
||||||
|
let peerEthState = peer.state(eth)
|
||||||
|
if peerEthState.initialized:
|
||||||
|
if peerEthState.bestDifficulty > bestBlockDifficulty:
|
||||||
|
bestBlockDifficulty = peerEthState.bestDifficulty
|
||||||
|
bestPeer = peer
|
||||||
|
|
||||||
|
result = (bestPeer, bestBlockDifficulty)
|
||||||
|
|
||||||
|
proc fastBlockchainSync*(node: EthereumNode): Future[SyncStatus] {.async.} =
|
||||||
|
## Code for the fast blockchain sync procedure:
|
||||||
|
## https://github.com/ethereum/wiki/wiki/Parallel-Block-Downloads
|
||||||
|
## https://github.com/ethereum/go-ethereum/pull/1889
|
||||||
|
# TODO: This needs a better interface. Consider removing this function and
|
||||||
|
# exposing SyncCtx
|
||||||
|
var syncCtx = newSyncContext(node.chain, node.peerPool)
|
||||||
|
syncCtx.startSync()
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
import
|
||||||
|
eth/common/[eth_types, state_accessors]
|
||||||
|
|
||||||
|
# TODO: Perhaps we can move this to eth-common
|
||||||
|
|
||||||
|
proc getBlockHeaders*(db: AbstractChainDb,
|
||||||
|
req: BlocksRequest): seq[BlockHeader] {.gcsafe.} =
|
||||||
|
result = newSeqOfCap[BlockHeader](req.maxResults)
|
||||||
|
|
||||||
|
var foundBlock: BlockHeader
|
||||||
|
if db.getBlockHeader(req.startBlock, foundBlock):
|
||||||
|
result.add foundBlock
|
||||||
|
|
||||||
|
while uint64(result.len) < req.maxResults:
|
||||||
|
if not db.getSuccessorHeader(foundBlock, foundBlock):
|
||||||
|
break
|
||||||
|
result.add foundBlock
|
||||||
|
|
||||||
|
template fetcher*(fetcherName, fetchingFunc, InputType, ResultType: untyped) =
|
||||||
|
proc fetcherName*(db: AbstractChainDb,
|
||||||
|
lookups: openarray[InputType]): seq[ResultType] {.gcsafe.} =
|
||||||
|
for lookup in lookups:
|
||||||
|
let fetched = fetchingFunc(db, lookup)
|
||||||
|
if fetched.hasData:
|
||||||
|
# TODO: should there be an else clause here.
|
||||||
|
# Is the peer responsible of figuring out that
|
||||||
|
# some of the requested items were not found?
|
||||||
|
result.add deref(fetched)
|
||||||
|
|
||||||
|
fetcher getContractCodes, getContractCode, ContractCodeRequest, Blob
|
||||||
|
fetcher getBlockBodies, getBlockBody, KeccakHash, BlockBody
|
||||||
|
fetcher getStorageNodes, getStorageNode, KeccakHash, Blob
|
||||||
|
fetcher getReceipts, getReceipt, KeccakHash, Receipt
|
||||||
|
fetcher getProofs, getProof, ProofRequest, Blob
|
||||||
|
fetcher getHeaderProofs, getHeaderProof, ProofRequest, Blob
|
||||||
|
|
||||||
|
proc getHelperTrieProofs*(db: AbstractChainDb,
|
||||||
|
reqs: openarray[HelperTrieProofRequest],
|
||||||
|
outNodes: var seq[Blob], outAuxData: var seq[Blob]) =
|
||||||
|
discard
|
||||||
|
|
|
@ -0,0 +1,326 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
import
|
||||||
|
times,
|
||||||
|
asyncdispatch2, eth/[keys, rlp], stint, nimcrypto, chronicles,
|
||||||
|
kademlia, enode
|
||||||
|
|
||||||
|
export
|
||||||
|
Node
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "discovery"
|
||||||
|
|
||||||
|
const
|
||||||
|
MAINNET_BOOTNODES* = [
|
||||||
|
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", # noqa: E501
|
||||||
|
"enode://aa36fdf33dd030378a0168efe6ed7d5cc587fafa3cdd375854fe735a2e11ea3650ba29644e2db48368c46e1f60e716300ba49396cd63778bf8a818c09bded46f@13.93.211.84:30303", # noqa: E501
|
||||||
|
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303", # noqa: E501
|
||||||
|
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303", # noqa: E501
|
||||||
|
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", # noqa: E501
|
||||||
|
]
|
||||||
|
ROPSTEN_BOOTNODES* = [
|
||||||
|
"enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", # noqa: E501
|
||||||
|
"enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", # noqa: E501
|
||||||
|
"enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", # noqa: E501
|
||||||
|
"enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303", # noqa: E501
|
||||||
|
]
|
||||||
|
LOCAL_BOOTNODES = [
|
||||||
|
"enode://6456719e7267e061161c88720287a77b80718d2a3a4ff5daeba614d029dc77601b75e32190aed1c9b0b9ccb6fac3bcf000f48e54079fa79e339c25d8e9724226@127.0.0.1:30301"
|
||||||
|
]
|
||||||
|
|
||||||
|
# UDP packet constants.
|
||||||
|
MAC_SIZE = 256 div 8 # 32
|
||||||
|
SIG_SIZE = 520 div 8 # 65
|
||||||
|
HEAD_SIZE = MAC_SIZE + SIG_SIZE # 97
|
||||||
|
EXPIRATION = 60 # let messages expire after N secondes
|
||||||
|
PROTO_VERSION = 4
|
||||||
|
|
||||||
|
type
|
||||||
|
DiscoveryProtocol* = ref object
|
||||||
|
privKey: PrivateKey
|
||||||
|
address: Address
|
||||||
|
bootstrapNodes*: seq[Node]
|
||||||
|
thisNode*: Node
|
||||||
|
kademlia: KademliaProtocol[DiscoveryProtocol]
|
||||||
|
transp: DatagramTransport
|
||||||
|
|
||||||
|
CommandId = enum
|
||||||
|
cmdPing = 1
|
||||||
|
cmdPong = 2
|
||||||
|
cmdFindNode = 3
|
||||||
|
cmdNeighbours = 4
|
||||||
|
|
||||||
|
const MaxDgramSize = 1280
|
||||||
|
|
||||||
|
proc append*(w: var RlpWriter, a: IpAddress) =
|
||||||
|
case a.family
|
||||||
|
of IpAddressFamily.IPv6:
|
||||||
|
w.append(a.address_v6.toMemRange)
|
||||||
|
of IpAddressFamily.IPv4:
|
||||||
|
w.append(a.address_v4.toMemRange)
|
||||||
|
|
||||||
|
proc append(w: var RlpWriter, p: Port) {.inline.} = w.append(p.int)
|
||||||
|
proc append(w: var RlpWriter, pk: PublicKey) {.inline.} = w.append(pk.getRaw())
|
||||||
|
proc append(w: var RlpWriter, h: MDigest[256]) {.inline.} = w.append(h.data)
|
||||||
|
|
||||||
|
proc pack(cmdId: CommandId, payload: BytesRange, pk: PrivateKey): Bytes =
|
||||||
|
## Create and sign a UDP message to be sent to a remote node.
|
||||||
|
##
|
||||||
|
## See https://github.com/ethereum/devp2p/blob/master/rlpx.md#node-discovery for information on
|
||||||
|
## how UDP packets are structured.
|
||||||
|
|
||||||
|
# TODO: There is a lot of unneeded allocations here
|
||||||
|
let encodedData = @[cmdId.byte] & payload.toSeq()
|
||||||
|
let signature = @(pk.signMessage(encodedData).getRaw())
|
||||||
|
let msgHash = keccak256.digest(signature & encodedData)
|
||||||
|
result = @(msgHash.data) & signature & encodedData
|
||||||
|
|
||||||
|
proc validateMsgHash(msg: Bytes, msgHash: var MDigest[256]): bool =
|
||||||
|
msgHash.data[0 .. ^1] = msg.toOpenArray(0, msgHash.data.high)
|
||||||
|
result = msgHash == keccak256.digest(msg.toOpenArray(MAC_SIZE, msg.high))
|
||||||
|
|
||||||
|
proc recoverMsgPublicKey(msg: Bytes, pk: var PublicKey): bool =
|
||||||
|
recoverSignatureKey(msg.toOpenArray(MAC_SIZE, MAC_SIZE + 65),
|
||||||
|
keccak256.digest(msg.toOpenArray(HEAD_SIZE, msg.high)).data,
|
||||||
|
pk) == EthKeysStatus.Success
|
||||||
|
|
||||||
|
proc unpack(msg: Bytes): tuple[cmdId: CommandId, payload: Bytes] =
|
||||||
|
result = (cmdId: msg[HEAD_SIZE].CommandId, payload: msg[HEAD_SIZE + 1 .. ^1])
|
||||||
|
|
||||||
|
proc expiration(): uint32 =
|
||||||
|
result = uint32(epochTime() + EXPIRATION)
|
||||||
|
|
||||||
|
# Wire protocol
|
||||||
|
|
||||||
|
proc send(d: DiscoveryProtocol, n: Node, data: seq[byte]) =
|
||||||
|
let ta = initTAddress(n.node.address.ip, n.node.address.udpPort)
|
||||||
|
let f = d.transp.sendTo(ta, data)
|
||||||
|
f.callback = proc(data: pointer) {.gcsafe.} =
|
||||||
|
if f.failed:
|
||||||
|
debug "Discovery send failed", msg = f.readError.msg
|
||||||
|
|
||||||
|
proc sendPing*(d: DiscoveryProtocol, n: Node): seq[byte] =
|
||||||
|
let payload = rlp.encode((PROTO_VERSION, d.address, n.node.address,
|
||||||
|
expiration())).toRange
|
||||||
|
let msg = pack(cmdPing, payload, d.privKey)
|
||||||
|
result = msg[0 ..< MAC_SIZE]
|
||||||
|
trace ">>> ping ", n
|
||||||
|
d.send(n, msg)
|
||||||
|
|
||||||
|
proc sendPong*(d: DiscoveryProtocol, n: Node, token: MDigest[256]) =
|
||||||
|
let payload = rlp.encode((n.node.address, token, expiration())).toRange
|
||||||
|
let msg = pack(cmdPong, payload, d.privKey)
|
||||||
|
trace ">>> pong ", n
|
||||||
|
d.send(n, msg)
|
||||||
|
|
||||||
|
proc sendFindNode*(d: DiscoveryProtocol, n: Node, targetNodeId: NodeId) =
|
||||||
|
var data: array[64, byte]
|
||||||
|
data[32 .. ^1] = targetNodeId.toByteArrayBE()
|
||||||
|
let payload = rlp.encode((data, expiration())).toRange
|
||||||
|
let msg = pack(cmdFindNode, payload, d.privKey)
|
||||||
|
trace ">>> find_node to ", n#, ": ", msg.toHex()
|
||||||
|
d.send(n, msg)
|
||||||
|
|
||||||
|
proc sendNeighbours*(d: DiscoveryProtocol, node: Node, neighbours: seq[Node]) =
|
||||||
|
const MAX_NEIGHBOURS_PER_PACKET = 12 # TODO: Implement a smarter way to compute it
|
||||||
|
type Neighbour = tuple[ip: IpAddress, udpPort, tcpPort: Port, pk: PublicKey]
|
||||||
|
var nodes = newSeqOfCap[Neighbour](MAX_NEIGHBOURS_PER_PACKET)
|
||||||
|
shallow(nodes)
|
||||||
|
|
||||||
|
template flush() =
|
||||||
|
block:
|
||||||
|
let payload = rlp.encode((nodes, expiration())).toRange
|
||||||
|
let msg = pack(cmdNeighbours, payload, d.privkey)
|
||||||
|
trace "Neighbours to", node, nodes
|
||||||
|
d.send(node, msg)
|
||||||
|
nodes.setLen(0)
|
||||||
|
|
||||||
|
for i, n in neighbours:
|
||||||
|
nodes.add((n.node.address.ip, n.node.address.udpPort,
|
||||||
|
n.node.address.tcpPort, n.node.pubkey))
|
||||||
|
if nodes.len == MAX_NEIGHBOURS_PER_PACKET:
|
||||||
|
flush()
|
||||||
|
|
||||||
|
if nodes.len != 0: flush()
|
||||||
|
|
||||||
|
proc newDiscoveryProtocol*(privKey: PrivateKey, address: Address,
|
||||||
|
bootstrapNodes: openarray[ENode]
|
||||||
|
): DiscoveryProtocol =
|
||||||
|
result.new()
|
||||||
|
result.privKey = privKey
|
||||||
|
result.address = address
|
||||||
|
result.bootstrapNodes = newSeqOfCap[Node](bootstrapNodes.len)
|
||||||
|
for n in bootstrapNodes: result.bootstrapNodes.add(newNode(n))
|
||||||
|
result.thisNode = newNode(privKey.getPublicKey(), address)
|
||||||
|
result.kademlia = newKademliaProtocol(result.thisNode, result)
|
||||||
|
|
||||||
|
proc recvPing(d: DiscoveryProtocol, node: Node,
|
||||||
|
msgHash: MDigest[256]) {.inline.} =
|
||||||
|
d.kademlia.recvPing(node, msgHash)
|
||||||
|
|
||||||
|
proc recvPong(d: DiscoveryProtocol, node: Node, payload: Bytes) {.inline.} =
|
||||||
|
let rlp = rlpFromBytes(payload.toRange)
|
||||||
|
let tok = rlp.listElem(1).toBytes().toSeq()
|
||||||
|
d.kademlia.recvPong(node, tok)
|
||||||
|
|
||||||
|
proc recvNeighbours(d: DiscoveryProtocol, node: Node,
|
||||||
|
payload: Bytes) {.inline.} =
|
||||||
|
let rlp = rlpFromBytes(payload.toRange)
|
||||||
|
let neighboursList = rlp.listElem(0)
|
||||||
|
let sz = neighboursList.listLen()
|
||||||
|
|
||||||
|
var neighbours = newSeqOfCap[Node](16)
|
||||||
|
for i in 0 ..< sz:
|
||||||
|
let n = neighboursList.listElem(i)
|
||||||
|
let ipBlob = n.listElem(0).toBytes
|
||||||
|
var ip: IpAddress
|
||||||
|
case ipBlob.len
|
||||||
|
of 4:
|
||||||
|
ip = IpAddress(family: IpAddressFamily.IPv4)
|
||||||
|
copyMem(addr ip.address_v4[0], baseAddr ipBlob, 4)
|
||||||
|
of 16:
|
||||||
|
ip = IpAddress(family: IpAddressFamily.IPv6)
|
||||||
|
copyMem(addr ip.address_v6[0], baseAddr ipBlob, 16)
|
||||||
|
else:
|
||||||
|
error "Wrong ip address length!"
|
||||||
|
continue
|
||||||
|
|
||||||
|
let udpPort = n.listElem(1).toInt(uint16).Port
|
||||||
|
let tcpPort = n.listElem(2).toInt(uint16).Port
|
||||||
|
var pk: PublicKey
|
||||||
|
if recoverPublicKey(n.listElem(3).toBytes.toOpenArray(), pk) != EthKeysStatus.Success:
|
||||||
|
warn "Could not parse public key"
|
||||||
|
continue
|
||||||
|
|
||||||
|
neighbours.add(newNode(pk, Address(ip: ip, udpPort: udpPort, tcpPort: tcpPort)))
|
||||||
|
d.kademlia.recvNeighbours(node, neighbours)
|
||||||
|
|
||||||
|
proc recvFindNode(d: DiscoveryProtocol, node: Node, payload: Bytes) {.inline.} =
|
||||||
|
let rlp = rlpFromBytes(payload.toRange)
|
||||||
|
trace "<<< find_node from ", node
|
||||||
|
let rng = rlp.listElem(0).toBytes
|
||||||
|
let nodeId = readUIntBE[256](rng[32 .. ^1].toOpenArray())
|
||||||
|
d.kademlia.recvFindNode(node, nodeId)
|
||||||
|
|
||||||
|
proc expirationValid(rlpEncodedPayload: seq[byte]): bool {.inline.} =
|
||||||
|
let rlp = rlpFromBytes(rlpEncodedPayload.toRange)
|
||||||
|
let expiration = rlp.listElem(rlp.listLen - 1).toInt(uint32)
|
||||||
|
result = epochTime() <= expiration.float
|
||||||
|
|
||||||
|
proc receive(d: DiscoveryProtocol, a: Address, msg: Bytes) =
|
||||||
|
var msgHash: MDigest[256]
|
||||||
|
if validateMsgHash(msg, msgHash):
|
||||||
|
var remotePubkey: PublicKey
|
||||||
|
if recoverMsgPublicKey(msg, remotePubkey):
|
||||||
|
let (cmdId, payload) = unpack(msg)
|
||||||
|
# echo "received cmd: ", cmdId, ", from: ", a
|
||||||
|
# echo "pubkey: ", remotePubkey.raw_key.toHex()
|
||||||
|
if expirationValid(payload):
|
||||||
|
let node = newNode(remotePubkey, a)
|
||||||
|
case cmdId
|
||||||
|
of cmdPing:
|
||||||
|
d.recvPing(node, msgHash)
|
||||||
|
of cmdPong:
|
||||||
|
d.recvPong(node, payload)
|
||||||
|
of cmdNeighbours:
|
||||||
|
d.recvNeighbours(node, payload)
|
||||||
|
of cmdFindNode:
|
||||||
|
d.recvFindNode(node, payload)
|
||||||
|
else:
|
||||||
|
trace "Received msg already expired", cmdId, a
|
||||||
|
else:
|
||||||
|
error "Wrong public key from ", a
|
||||||
|
else:
|
||||||
|
error "Wrong msg mac from ", a
|
||||||
|
|
||||||
|
proc processClient(transp: DatagramTransport,
|
||||||
|
raddr: TransportAddress): Future[void] {.async, gcsafe.} =
|
||||||
|
var proto = getUserData[DiscoveryProtocol](transp)
|
||||||
|
var buf: seq[byte]
|
||||||
|
try:
|
||||||
|
# TODO: Maybe here better to use `peekMessage()` to avoid allocation,
|
||||||
|
# but `Bytes` object is just a simple seq[byte], and `ByteRange` object
|
||||||
|
# do not support custom length.
|
||||||
|
var buf = transp.getMessage()
|
||||||
|
let a = Address(ip: raddr.address, udpPort: raddr.port, tcpPort: raddr.port)
|
||||||
|
proto.receive(a, buf)
|
||||||
|
except:
|
||||||
|
debug "Receive failed", err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
proc open*(d: DiscoveryProtocol) =
|
||||||
|
let ta = initTAddress(d.address.ip, d.address.udpPort)
|
||||||
|
d.transp = newDatagramTransport(processClient, udata = d, local = ta)
|
||||||
|
|
||||||
|
proc lookupRandom*(d: DiscoveryProtocol): Future[seq[Node]] {.inline.} =
|
||||||
|
d.kademlia.lookupRandom()
|
||||||
|
|
||||||
|
proc run(d: DiscoveryProtocol) {.async.} =
|
||||||
|
while true:
|
||||||
|
discard await d.lookupRandom()
|
||||||
|
await sleepAsync(3000)
|
||||||
|
trace "Discovered nodes", nodes = d.kademlia.nodesDiscovered
|
||||||
|
|
||||||
|
proc bootstrap*(d: DiscoveryProtocol) {.async.} =
|
||||||
|
await d.kademlia.bootstrap(d.bootstrapNodes)
|
||||||
|
discard d.run()
|
||||||
|
|
||||||
|
proc resolve*(d: DiscoveryProtocol, n: NodeId): Future[Node] =
|
||||||
|
d.kademlia.resolve(n)
|
||||||
|
|
||||||
|
proc randomNodes*(d: DiscoveryProtocol, count: int): seq[Node] {.inline.} =
|
||||||
|
d.kademlia.randomNodes(count)
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
import logging, byteutils
|
||||||
|
|
||||||
|
addHandler(newConsoleLogger())
|
||||||
|
|
||||||
|
block:
|
||||||
|
let m = hexToSeqByte"79664bff52ee17327b4a2d8f97d8fb32c9244d719e5038eb4f6b64da19ca6d271d659c3ad9ad7861a928ca85f8d8debfbe6b7ade26ad778f2ae2ba712567fcbd55bc09eb3e74a893d6b180370b266f6aaf3fe58a0ad95f7435bf3ddf1db940d20102f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2"
|
||||||
|
var msgHash: MDigest[256]
|
||||||
|
doAssert(validateMsgHash(m, msgHash))
|
||||||
|
var remotePubkey: PublicKey
|
||||||
|
doAssert(recoverMsgPublicKey(m, remotePubkey))
|
||||||
|
|
||||||
|
let (cmdId, payload) = unpack(m)
|
||||||
|
assert(payload == hexToSeqByte"f2cb842edbd4d182944382765da0ab56fb9e64a85a597e6bb27c656b4f1afb7e06b0fd4e41ccde6dba69a3c4a150845aaa4de2")
|
||||||
|
assert(cmdId == cmdPong)
|
||||||
|
assert(remotePubkey == initPublicKey("78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d"))
|
||||||
|
|
||||||
|
let privKey = initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
|
||||||
|
|
||||||
|
# echo privKey
|
||||||
|
|
||||||
|
# block:
|
||||||
|
# var b = @[1.byte, 2, 3]
|
||||||
|
# let m = pack(cmdPing, b.initBytesRange, privKey)
|
||||||
|
# let (remotePubkey, cmdId, payload) = unpack(m)
|
||||||
|
# assert(remotePubkey.raw_key.toHex == privKey.public_key.raw_key.toHex)
|
||||||
|
|
||||||
|
var bootnodes = newSeq[ENode]()
|
||||||
|
for item in LOCAL_BOOTNODES:
|
||||||
|
bootnodes.add(initENode(item))
|
||||||
|
|
||||||
|
let listenPort = Port(30310)
|
||||||
|
var address = Address(udpPort: listenPort, tcpPort: listenPort)
|
||||||
|
address.ip.family = IpAddressFamily.IPv4
|
||||||
|
let discovery = newDiscoveryProtocol(privkey, address, bootnodes)
|
||||||
|
|
||||||
|
echo discovery.thisNode.node.pubkey
|
||||||
|
echo "this_node.id: ", discovery.thisNode.id.toHex()
|
||||||
|
|
||||||
|
discovery.open()
|
||||||
|
|
||||||
|
proc test() {.async.} =
|
||||||
|
await discovery.bootstrap()
|
||||||
|
|
||||||
|
waitFor test()
|
|
@ -0,0 +1,207 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
## This module implements ECIES method encryption/decryption.
|
||||||
|
|
||||||
|
import eth/keys, nimcrypto
|
||||||
|
|
||||||
|
const
|
||||||
|
emptyMac* = array[0, byte]([])
|
||||||
|
|
||||||
|
type
|
||||||
|
EciesException* = object of Exception
|
||||||
|
EciesStatus* = enum
|
||||||
|
Success, ## Operation was successful
|
||||||
|
BufferOverrun, ## Output buffer size is too small
|
||||||
|
RandomError, ## Could not obtain random data
|
||||||
|
EcdhError, ## ECDH shared secret could not be calculated
|
||||||
|
WrongHeader, ## ECIES header is incorrect
|
||||||
|
IncorrectKey, ## Recovered public key is invalid
|
||||||
|
IncorrectTag, ## ECIES tag verification failed
|
||||||
|
IncompleteError ## Decryption needs more data
|
||||||
|
|
||||||
|
EciesHeader* = object {.packed.}
|
||||||
|
version*: byte
|
||||||
|
pubkey*: array[RawPublicKeySize, byte]
|
||||||
|
iv*: array[aes128.sizeBlock, byte]
|
||||||
|
data*: byte
|
||||||
|
|
||||||
|
template eciesOverheadLength*(): int =
|
||||||
|
## Return data overhead size for ECIES encrypted message
|
||||||
|
1 + sizeof(PublicKey) + aes128.sizeBlock + sha256.sizeDigest
|
||||||
|
|
||||||
|
template eciesEncryptedLength*(size: int): int =
|
||||||
|
## Return size of encrypted message for message with size `size`.
|
||||||
|
size + eciesOverheadLength()
|
||||||
|
|
||||||
|
template eciesDecryptedLength*(size: int): int =
|
||||||
|
## Return size of decrypted message for encrypted message with size `size`.
|
||||||
|
size - eciesOverheadLength()
|
||||||
|
|
||||||
|
template eciesMacLength(size: int): int =
|
||||||
|
## Return size of authenticated data
|
||||||
|
size + aes128.sizeBlock
|
||||||
|
|
||||||
|
template eciesMacPos(size: int): int =
|
||||||
|
## Return position of MAC code in encrypted block
|
||||||
|
size - sha256.sizeDigest
|
||||||
|
|
||||||
|
template eciesDataPos(): int =
|
||||||
|
## Return position of encrypted data in block
|
||||||
|
1 + sizeof(PublicKey) + aes128.sizeBlock
|
||||||
|
|
||||||
|
template eciesIvPos(): int =
|
||||||
|
## Return position of IV in block
|
||||||
|
1 + sizeof(PublicKey)
|
||||||
|
|
||||||
|
template eciesTagPos(size: int): int =
|
||||||
|
1 + sizeof(PublicKey) + aes128.sizeBlock + size
|
||||||
|
|
||||||
|
proc kdf*(data: openarray[byte]): array[KeyLength, byte] {.noInit.} =
|
||||||
|
## NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1)
|
||||||
|
var ctx: sha256
|
||||||
|
var counter: uint32
|
||||||
|
var counterLe: uint32
|
||||||
|
let reps = ((KeyLength + 7) * 8) div (int(ctx.sizeBlock) * 8)
|
||||||
|
var offset = 0
|
||||||
|
var storage = newSeq[byte](int(ctx.sizeDigest) * (reps + 1))
|
||||||
|
while counter <= uint32(reps):
|
||||||
|
counter = counter + 1
|
||||||
|
counterLe = LSWAP(counter)
|
||||||
|
ctx.init()
|
||||||
|
ctx.update(cast[ptr byte](addr counterLe), uint(sizeof(uint32)))
|
||||||
|
ctx.update(unsafeAddr data[0], uint(len(data)))
|
||||||
|
var hash = ctx.finish()
|
||||||
|
copyMem(addr storage[offset], addr hash.data[0], ctx.sizeDigest)
|
||||||
|
offset += int(ctx.sizeDigest)
|
||||||
|
ctx.clear() # clean ctx
|
||||||
|
copyMem(addr result[0], addr storage[0], KeyLength)
|
||||||
|
|
||||||
|
proc eciesEncrypt*(input: openarray[byte], output: var openarray[byte],
|
||||||
|
pubkey: PublicKey,
|
||||||
|
sharedmac: openarray[byte] = emptyMac): EciesStatus =
|
||||||
|
## Encrypt data with ECIES method using given public key `pubkey`.
|
||||||
|
## ``input`` - input data
|
||||||
|
## ``output`` - output data
|
||||||
|
## ``pubkey`` - ECC public key
|
||||||
|
## ``sharedmac`` - additional data used to calculate encrypted message MAC
|
||||||
|
## Length of output data can be calculated using ``eciesEncryptedLength()``
|
||||||
|
## template.
|
||||||
|
var
|
||||||
|
encKey: array[aes128.sizeKey, byte]
|
||||||
|
cipher: CTR[aes128]
|
||||||
|
ctx: HMAC[sha256]
|
||||||
|
iv: array[aes128.sizeBlock, byte]
|
||||||
|
secret: SharedSecret
|
||||||
|
material: array[KeyLength, byte]
|
||||||
|
|
||||||
|
if len(output) < eciesEncryptedLength(len(input)):
|
||||||
|
return(BufferOverrun)
|
||||||
|
if randomBytes(iv) != aes128.sizeBlock:
|
||||||
|
return(RandomError)
|
||||||
|
|
||||||
|
var ephemeral = newKeyPair()
|
||||||
|
|
||||||
|
if ecdhAgree(ephemeral.seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
|
||||||
|
material = kdf(secret.data)
|
||||||
|
burnMem(secret)
|
||||||
|
|
||||||
|
copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
|
||||||
|
var macKey = sha256.digest(material, ostart = KeyLength div 2)
|
||||||
|
burnMem(material)
|
||||||
|
|
||||||
|
var header = cast[ptr EciesHeader](addr output[0])
|
||||||
|
header.version = 0x04
|
||||||
|
header.pubkey = ephemeral.pubkey.getRaw()
|
||||||
|
header.iv = iv
|
||||||
|
|
||||||
|
var so = eciesDataPos()
|
||||||
|
var eo = so + len(input)
|
||||||
|
cipher.init(encKey, iv)
|
||||||
|
cipher.encrypt(input, toOpenArray(output, so, eo))
|
||||||
|
burnMem(encKey)
|
||||||
|
cipher.clear()
|
||||||
|
|
||||||
|
so = eciesIvPos()
|
||||||
|
eo = so + aes128.sizeBlock + len(input) - 1
|
||||||
|
ctx.init(macKey.data)
|
||||||
|
ctx.update(toOpenArray(output, so, eo))
|
||||||
|
if len(sharedmac) > 0:
|
||||||
|
ctx.update(sharedmac)
|
||||||
|
var tag = ctx.finish()
|
||||||
|
|
||||||
|
so = eciesTagPos(len(input))
|
||||||
|
# ctx.sizeDigest() crash compiler
|
||||||
|
copyMem(addr output[so], addr tag.data[0], sha256.sizeDigest)
|
||||||
|
ctx.clear()
|
||||||
|
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc eciesDecrypt*(input: openarray[byte],
|
||||||
|
output: var openarray[byte],
|
||||||
|
seckey: PrivateKey,
|
||||||
|
sharedmac: openarray[byte] = emptyMac): EciesStatus =
|
||||||
|
## Decrypt data with ECIES method using given private key `seckey`.
|
||||||
|
## ``input`` - input data
|
||||||
|
## ``output`` - output data
|
||||||
|
## ``pubkey`` - ECC private key
|
||||||
|
## ``sharedmac`` - additional data used to calculate encrypted message MAC
|
||||||
|
## Length of output data can be calculated using ``eciesDecryptedLength()``
|
||||||
|
## template.
|
||||||
|
var
|
||||||
|
pubkey: PublicKey
|
||||||
|
encKey: array[aes128.sizeKey, byte]
|
||||||
|
cipher: CTR[aes128]
|
||||||
|
ctx: HMAC[sha256]
|
||||||
|
secret: SharedSecret
|
||||||
|
|
||||||
|
if len(input) <= 0:
|
||||||
|
return(IncompleteError)
|
||||||
|
|
||||||
|
var header = cast[ptr EciesHeader](unsafeAddr input[0])
|
||||||
|
if header.version != 0x04:
|
||||||
|
return(WrongHeader)
|
||||||
|
if len(input) <= eciesOverheadLength():
|
||||||
|
return(IncompleteError)
|
||||||
|
if len(input) - eciesOverheadLength() > len(output):
|
||||||
|
return(BufferOverrun)
|
||||||
|
if recoverPublicKey(header.pubkey, pubkey) != EthKeysStatus.Success:
|
||||||
|
return(IncorrectKey)
|
||||||
|
if ecdhAgree(seckey, pubkey, secret) != EthKeysStatus.Success:
|
||||||
|
return(EcdhError)
|
||||||
|
|
||||||
|
var material = kdf(secret.data)
|
||||||
|
burnMem(secret)
|
||||||
|
copyMem(addr encKey[0], addr material[0], aes128.sizeKey)
|
||||||
|
var macKey = sha256.digest(material, ostart = KeyLength div 2)
|
||||||
|
burnMem(material)
|
||||||
|
|
||||||
|
let macsize = eciesMacLength(len(input) - eciesOverheadLength())
|
||||||
|
ctx.init(macKey.data)
|
||||||
|
burnMem(macKey)
|
||||||
|
ctx.update(toOpenArray(input, eciesIvPos(), eciesIvPos() + macsize - 1))
|
||||||
|
if len(sharedmac) > 0:
|
||||||
|
ctx.update(sharedmac)
|
||||||
|
var tag = ctx.finish()
|
||||||
|
ctx.clear()
|
||||||
|
|
||||||
|
if not equalMem(addr tag.data[0], unsafeAddr input[eciesMacPos(len(input))],
|
||||||
|
sha256.sizeDigest):
|
||||||
|
return(IncorrectTag)
|
||||||
|
|
||||||
|
let datsize = eciesDecryptedLength(len(input))
|
||||||
|
cipher.init(encKey, header.iv)
|
||||||
|
burnMem(encKey)
|
||||||
|
cipher.decrypt(toOpenArray(input, eciesDataPos(),
|
||||||
|
eciesDataPos() + datsize - 1), output)
|
||||||
|
cipher.clear()
|
||||||
|
result = Success
|
|
@ -0,0 +1,162 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
import uri, strutils, net
|
||||||
|
import eth/keys
|
||||||
|
|
||||||
|
type
|
||||||
|
ENodeStatus* = enum
|
||||||
|
## ENode status codes
|
||||||
|
Success, ## Conversion operation succeed
|
||||||
|
IncorrectNodeId, ## Incorrect public key supplied
|
||||||
|
IncorrectScheme, ## Incorrect URI scheme supplied
|
||||||
|
IncorrectIP, ## Incorrect IP address supplied
|
||||||
|
IncorrectPort, ## Incorrect TCP port supplied
|
||||||
|
IncorrectDiscPort, ## Incorrect UDP discovery port supplied
|
||||||
|
IncorrectUri, ## Incorrect URI supplied
|
||||||
|
IncompleteENode ## Incomplete ENODE object
|
||||||
|
|
||||||
|
Address* = object
|
||||||
|
## Network address object
|
||||||
|
ip*: IpAddress ## IPv4/IPv6 address
|
||||||
|
udpPort*: Port ## UDP discovery port number
|
||||||
|
tcpPort*: Port ## TCP port number
|
||||||
|
|
||||||
|
ENode* = object
|
||||||
|
## ENode object
|
||||||
|
pubkey*: PublicKey ## Node public key
|
||||||
|
address*: Address ## Node address
|
||||||
|
|
||||||
|
ENodeException* = object of Exception
|
||||||
|
|
||||||
|
proc raiseENodeError(status: ENodeStatus) =
|
||||||
|
if status == IncorrectIP:
|
||||||
|
raise newException(ENodeException, "Incorrect IP address")
|
||||||
|
elif status == IncorrectPort:
|
||||||
|
raise newException(ENodeException, "Incorrect port number")
|
||||||
|
elif status == IncorrectDiscPort:
|
||||||
|
raise newException(ENodeException, "Incorrect discovery port number")
|
||||||
|
elif status == IncorrectUri:
|
||||||
|
raise newException(ENodeException, "Incorrect URI")
|
||||||
|
elif status == IncorrectScheme:
|
||||||
|
raise newException(ENodeException, "Incorrect scheme")
|
||||||
|
elif status == IncorrectNodeId:
|
||||||
|
raise newException(ENodeException, "Incorrect node id")
|
||||||
|
elif status == IncompleteENode:
|
||||||
|
raise newException(ENodeException, "Incomplete enode")
|
||||||
|
|
||||||
|
proc initENode*(e: string, node: var ENode): ENodeStatus =
|
||||||
|
## Initialize ENode ``node`` from URI string ``uri``.
|
||||||
|
var
|
||||||
|
uport: int = 0
|
||||||
|
tport: int = 0
|
||||||
|
uri: Uri = initUri()
|
||||||
|
data: string
|
||||||
|
|
||||||
|
if len(e) == 0:
|
||||||
|
return IncorrectUri
|
||||||
|
|
||||||
|
parseUri(e, uri)
|
||||||
|
|
||||||
|
if len(uri.scheme) == 0 or uri.scheme.toLowerAscii() != "enode":
|
||||||
|
return IncorrectScheme
|
||||||
|
|
||||||
|
if len(uri.username) != 128:
|
||||||
|
return IncorrectNodeId
|
||||||
|
|
||||||
|
for i in uri.username:
|
||||||
|
if i notin {'A'..'F', 'a'..'f', '0'..'9'}:
|
||||||
|
return IncorrectNodeId
|
||||||
|
|
||||||
|
if len(uri.password) != 0 or len(uri.path) != 0 or len(uri.anchor) != 0:
|
||||||
|
return IncorrectUri
|
||||||
|
|
||||||
|
if len(uri.hostname) == 0:
|
||||||
|
return IncorrectIP
|
||||||
|
|
||||||
|
try:
|
||||||
|
if len(uri.port) == 0:
|
||||||
|
return IncorrectPort
|
||||||
|
tport = parseInt(uri.port)
|
||||||
|
if tport <= 0 or tport > 65535:
|
||||||
|
return IncorrectPort
|
||||||
|
except:
|
||||||
|
return IncorrectPort
|
||||||
|
|
||||||
|
if len(uri.query) > 0:
|
||||||
|
if not uri.query.toLowerAscii().startsWith("discport="):
|
||||||
|
return IncorrectDiscPort
|
||||||
|
try:
|
||||||
|
uport = parseInt(uri.query[9..^1])
|
||||||
|
if uport <= 0 or uport > 65535:
|
||||||
|
return IncorrectDiscPort
|
||||||
|
except:
|
||||||
|
return IncorrectDiscPort
|
||||||
|
else:
|
||||||
|
uport = tport
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = parseHexStr(uri.username)
|
||||||
|
if recoverPublicKey(cast[seq[byte]](data),
|
||||||
|
node.pubkey) != EthKeysStatus.Success:
|
||||||
|
return IncorrectNodeId
|
||||||
|
except:
|
||||||
|
return IncorrectNodeId
|
||||||
|
|
||||||
|
try:
|
||||||
|
node.address.ip = parseIpAddress(uri.hostname)
|
||||||
|
except:
|
||||||
|
zeroMem(addr node.pubkey, KeyLength * 2)
|
||||||
|
return IncorrectIP
|
||||||
|
|
||||||
|
node.address.tcpPort = Port(tport)
|
||||||
|
node.address.udpPort = Port(uport)
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc initENode*(uri: string): ENode {.inline.} =
|
||||||
|
## Returns ENode object from URI string ``uri``.
|
||||||
|
let res = initENode(uri, result)
|
||||||
|
if res != Success:
|
||||||
|
raiseENodeError(res)
|
||||||
|
|
||||||
|
proc initENode*(pubkey: PublicKey, address: Address): ENode {.inline.} =
|
||||||
|
## Create ENode object from public key ``pubkey`` and ``address``.
|
||||||
|
result.pubkey = pubkey
|
||||||
|
result.address = address
|
||||||
|
|
||||||
|
proc isCorrect*(n: ENode): bool =
|
||||||
|
## Returns ``true`` if ENode ``n`` is properly filled.
|
||||||
|
result = false
|
||||||
|
for i in n.pubkey.data:
|
||||||
|
if i != 0x00'u8:
|
||||||
|
result = true
|
||||||
|
break
|
||||||
|
|
||||||
|
proc `$`*(n: ENode): string =
|
||||||
|
## Returns string representation of ENode.
|
||||||
|
var ipaddr: string
|
||||||
|
if not isCorrect(n):
|
||||||
|
raiseENodeError(IncompleteENode)
|
||||||
|
if n.address.ip.family == IpAddressFamily.IPv4:
|
||||||
|
ipaddr = $(n.address.ip)
|
||||||
|
else:
|
||||||
|
ipaddr = "[" & $(n.address.ip) & "]"
|
||||||
|
result = newString(0)
|
||||||
|
result.add("enode://")
|
||||||
|
result.add($n.pubkey)
|
||||||
|
result.add("@")
|
||||||
|
result.add(ipaddr)
|
||||||
|
if uint16(n.address.tcpPort) != 0:
|
||||||
|
result.add(":")
|
||||||
|
result.add($int(n.address.tcpPort))
|
||||||
|
if uint16(n.address.udpPort) != uint16(n.address.tcpPort):
|
||||||
|
result.add("?")
|
||||||
|
result.add("discport=")
|
||||||
|
result.add($int(n.address.udpPort))
|
|
@ -0,0 +1,505 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
import
|
||||||
|
tables, hashes, times, algorithm, sets, sequtils, random,
|
||||||
|
asyncdispatch2, chronicles, eth/keys, stint, nimcrypto,
|
||||||
|
enode
|
||||||
|
|
||||||
|
export sets # TODO: This should not be needed, but compilation fails otherwise
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "kademlia"
|
||||||
|
|
||||||
|
type
|
||||||
|
KademliaProtocol* [Wire] = ref object
|
||||||
|
wire: Wire
|
||||||
|
thisNode: Node
|
||||||
|
routing: RoutingTable
|
||||||
|
pongFutures: Table[seq[byte], Future[bool]]
|
||||||
|
pingFutures: Table[Node, Future[bool]]
|
||||||
|
neighboursCallbacks: Table[Node, proc(n: seq[Node]) {.gcsafe.}]
|
||||||
|
|
||||||
|
NodeId* = UInt256
|
||||||
|
|
||||||
|
Node* = ref object
|
||||||
|
node*: ENode
|
||||||
|
id*: NodeId
|
||||||
|
|
||||||
|
RoutingTable = object
|
||||||
|
thisNode: Node
|
||||||
|
buckets: seq[KBucket]
|
||||||
|
|
||||||
|
KBucket = ref object
|
||||||
|
istart, iend: UInt256
|
||||||
|
nodes: seq[Node]
|
||||||
|
replacementCache: seq[Node]
|
||||||
|
lastUpdated: float # epochTime
|
||||||
|
|
||||||
|
const
|
||||||
|
BUCKET_SIZE = 16
|
||||||
|
BITS_PER_HOP = 8
|
||||||
|
REQUEST_TIMEOUT = 900 # timeout of message round trips
|
||||||
|
FIND_CONCURRENCY = 3 # parallel find node lookups
|
||||||
|
ID_SIZE = 256
|
||||||
|
|
||||||
|
proc toNodeId*(pk: PublicKey): NodeId =
|
||||||
|
readUintBE[256](keccak256.digest(pk.getRaw()).data)
|
||||||
|
|
||||||
|
proc newNode*(pk: PublicKey, address: Address): Node =
|
||||||
|
result.new()
|
||||||
|
result.node = initENode(pk, address)
|
||||||
|
result.id = pk.toNodeId()
|
||||||
|
|
||||||
|
proc newNode*(uriString: string): Node =
|
||||||
|
result.new()
|
||||||
|
result.node = initENode(uriString)
|
||||||
|
result.id = result.node.pubkey.toNodeId()
|
||||||
|
|
||||||
|
proc newNode*(enode: ENode): Node =
|
||||||
|
result.new()
|
||||||
|
result.node = enode
|
||||||
|
result.id = result.node.pubkey.toNodeId()
|
||||||
|
|
||||||
|
proc distanceTo(n: Node, id: NodeId): UInt256 = n.id xor id
|
||||||
|
|
||||||
|
proc `$`*(n: Node): string =
|
||||||
|
if n == nil:
|
||||||
|
"Node[local]"
|
||||||
|
else:
|
||||||
|
"Node[" & $n.node.address.ip & ":" & $n.node.address.udpPort & "]"
|
||||||
|
|
||||||
|
proc hash*(n: Node): hashes.Hash = hash(n.node.pubkey.data)
|
||||||
|
proc `==`*(a, b: Node): bool = a.node.pubkey == b.node.pubkey
|
||||||
|
|
||||||
|
proc newKBucket(istart, iend: NodeId): KBucket =
|
||||||
|
result.new()
|
||||||
|
result.istart = istart
|
||||||
|
result.iend = iend
|
||||||
|
result.nodes = @[]
|
||||||
|
result.replacementCache = @[]
|
||||||
|
|
||||||
|
proc midpoint(k: KBucket): NodeId =
|
||||||
|
k.istart + (k.iend - k.istart) div 2.u256
|
||||||
|
|
||||||
|
proc distanceTo(k: KBucket, id: NodeId): UInt256 = k.midpoint xor id
|
||||||
|
proc nodesByDistanceTo(k: KBucket, id: NodeId): seq[Node] =
|
||||||
|
sortedByIt(k.nodes, it.distanceTo(id))
|
||||||
|
|
||||||
|
proc len(k: KBucket): int {.inline.} = k.nodes.len
|
||||||
|
proc head(k: KBucket): Node {.inline.} = k.nodes[0]
|
||||||
|
|
||||||
|
proc add(k: KBucket, n: Node): Node =
|
||||||
|
## Try to add the given node to this bucket.
|
||||||
|
|
||||||
|
## If the node is already present, it is moved to the tail of the list, and we return None.
|
||||||
|
|
||||||
|
## If the node is not already present and the bucket has fewer than k entries, it is inserted
|
||||||
|
## at the tail of the list, and we return None.
|
||||||
|
|
||||||
|
## If the bucket is full, we add the node to the bucket's replacement cache and return the
|
||||||
|
## node at the head of the list (i.e. the least recently seen), which should be evicted if it
|
||||||
|
## fails to respond to a ping.
|
||||||
|
k.lastUpdated = epochTime()
|
||||||
|
let nodeIdx = k.nodes.find(n)
|
||||||
|
if nodeIdx != -1:
|
||||||
|
k.nodes.delete(nodeIdx)
|
||||||
|
k.nodes.add(n)
|
||||||
|
elif k.len < BUCKET_SIZE:
|
||||||
|
k.nodes.add(n)
|
||||||
|
else:
|
||||||
|
k.replacementCache.add(n)
|
||||||
|
return k.head
|
||||||
|
return nil
|
||||||
|
|
||||||
|
proc removeNode(k: KBucket, n: Node) =
|
||||||
|
let i = k.nodes.find(n)
|
||||||
|
if i != -1: k.nodes.delete(i)
|
||||||
|
|
||||||
|
proc split(k: KBucket): tuple[lower, upper: KBucket] =
|
||||||
|
## Split at the median id
|
||||||
|
let splitid = k.midpoint
|
||||||
|
result.lower = newKBucket(k.istart, splitid)
|
||||||
|
result.upper = newKBucket(splitid + 1.u256, k.iend)
|
||||||
|
for node in k.nodes:
|
||||||
|
let bucket = if node.id <= splitid: result.lower else: result.upper
|
||||||
|
discard bucket.add(node)
|
||||||
|
for node in k.replacementCache:
|
||||||
|
let bucket = if node.id <= splitid: result.lower else: result.upper
|
||||||
|
bucket.replacementCache.add(node)
|
||||||
|
|
||||||
|
proc inRange(k: KBucket, n: Node): bool {.inline.} =
|
||||||
|
k.istart <= n.id and n.id <= k.iend
|
||||||
|
|
||||||
|
proc isFull(k: KBucket): bool = k.len == BUCKET_SIZE
|
||||||
|
|
||||||
|
proc contains(k: KBucket, n: Node): bool = n in k.nodes
|
||||||
|
|
||||||
|
proc binaryGetBucketForNode(buckets: openarray[KBucket],
|
||||||
|
n: Node): KBucket {.inline.} =
|
||||||
|
## Given a list of ordered buckets, returns the bucket for a given node.
|
||||||
|
let bucketPos = lowerBound(buckets, n.id) do(a: KBucket, b: NodeId) -> int:
|
||||||
|
cmp(a.iend, b)
|
||||||
|
# Prevents edge cases where bisect_left returns an out of range index
|
||||||
|
if bucketPos < buckets.len:
|
||||||
|
let bucket = buckets[bucketPos]
|
||||||
|
if bucket.istart <= n.id and n.id <= bucket.iend:
|
||||||
|
result = bucket
|
||||||
|
|
||||||
|
if result.isNil:
|
||||||
|
raise newException(ValueError, "No bucket found for node with id " & $n.id)
|
||||||
|
|
||||||
|
proc computeSharedPrefixBits(nodes: openarray[Node]): int =
|
||||||
|
## Count the number of prefix bits shared by all nodes.
|
||||||
|
if nodes.len < 2:
|
||||||
|
return ID_SIZE
|
||||||
|
|
||||||
|
var mask = zero(UInt256)
|
||||||
|
let one = one(UInt256)
|
||||||
|
|
||||||
|
for i in 1 .. ID_SIZE:
|
||||||
|
mask = mask or (one shl (ID_SIZE - i))
|
||||||
|
let reference = nodes[0].id and mask
|
||||||
|
for j in 1 .. nodes.high:
|
||||||
|
if (nodes[j].id and mask) != reference: return i - 1
|
||||||
|
|
||||||
|
assert(false, "Unable to calculate number of shared prefix bits")
|
||||||
|
|
||||||
|
proc init(r: var RoutingTable, thisNode: Node) {.inline.} =
|
||||||
|
r.thisNode = thisNode
|
||||||
|
r.buckets = @[newKBucket(0.u256, high(Uint256))]
|
||||||
|
|
||||||
|
proc splitBucket(r: var RoutingTable, index: int) =
|
||||||
|
let bucket = r.buckets[index]
|
||||||
|
let (a, b) = bucket.split()
|
||||||
|
r.buckets[index] = a
|
||||||
|
r.buckets.insert(b, index + 1)
|
||||||
|
|
||||||
|
proc bucketForNode(r: RoutingTable, n: Node): KBucket =
|
||||||
|
binaryGetBucketForNode(r.buckets, n)
|
||||||
|
|
||||||
|
proc removeNode(r: var RoutingTable, n: Node) =
|
||||||
|
r.bucketForNode(n).removeNode(n)
|
||||||
|
|
||||||
|
proc addNode(r: var RoutingTable, n: Node): Node =
|
||||||
|
assert(n != r.thisNode)
|
||||||
|
let bucket = r.bucketForNode(n)
|
||||||
|
let evictionCandidate = bucket.add(n)
|
||||||
|
if not evictionCandidate.isNil:
|
||||||
|
# Split if the bucket has the local node in its range or if the depth is not congruent
|
||||||
|
# to 0 mod BITS_PER_HOP
|
||||||
|
|
||||||
|
let depth = computeSharedPrefixBits(bucket.nodes)
|
||||||
|
if bucket.inRange(r.thisNode) or (depth mod BITS_PER_HOP != 0 and depth != ID_SIZE):
|
||||||
|
r.splitBucket(r.buckets.find(bucket))
|
||||||
|
return r.addNode(n) # retry
|
||||||
|
|
||||||
|
# Nothing added, ping evictionCandidate
|
||||||
|
return evictionCandidate
|
||||||
|
|
||||||
|
proc contains(r: RoutingTable, n: Node): bool = n in r.bucketForNode(n)
|
||||||
|
|
||||||
|
proc bucketsByDistanceTo(r: RoutingTable, id: NodeId): seq[KBucket] =
|
||||||
|
sortedByIt(r.buckets, it.distanceTo(id))
|
||||||
|
|
||||||
|
proc notFullBuckets(r: RoutingTable): seq[KBucket] =
|
||||||
|
r.buckets.filterIt(not it.isFull)
|
||||||
|
|
||||||
|
proc neighbours(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE): seq[Node] =
|
||||||
|
## Return up to k neighbours of the given node.
|
||||||
|
result = newSeqOfCap[Node](k * 2)
|
||||||
|
for bucket in r.bucketsByDistanceTo(id):
|
||||||
|
for n in bucket.nodesByDistanceTo(id):
|
||||||
|
if n.id != id:
|
||||||
|
result.add(n)
|
||||||
|
if result.len == k * 2:
|
||||||
|
break
|
||||||
|
result = sortedByIt(result, it.distanceTo(id))
|
||||||
|
if result.len > k:
|
||||||
|
result.setLen(k)
|
||||||
|
|
||||||
|
proc len(r: RoutingTable): int =
|
||||||
|
for b in r.buckets: result += b.len
|
||||||
|
|
||||||
|
proc newKademliaProtocol*[Wire](thisNode: Node,
|
||||||
|
wire: Wire): KademliaProtocol[Wire] =
|
||||||
|
result.new()
|
||||||
|
result.thisNode = thisNode
|
||||||
|
result.wire = wire
|
||||||
|
result.pongFutures = initTable[seq[byte], Future[bool]]()
|
||||||
|
result.pingFutures = initTable[Node, Future[bool]]()
|
||||||
|
result.neighboursCallbacks = initTable[Node, proc(n: seq[Node])]()
|
||||||
|
result.routing.init(thisNode)
|
||||||
|
|
||||||
|
proc bond(k: KademliaProtocol, n: Node): Future[bool] {.async.}
|
||||||
|
|
||||||
|
proc updateRoutingTable(k: KademliaProtocol, n: Node) =
|
||||||
|
## Update the routing table entry for the given node.
|
||||||
|
let evictionCandidate = k.routing.addNode(n)
|
||||||
|
if not evictionCandidate.isNil:
|
||||||
|
# This means we couldn't add the node because its bucket is full, so schedule a bond()
|
||||||
|
# with the least recently seen node on that bucket. If the bonding fails the node will
|
||||||
|
# be removed from the bucket and a new one will be picked from the bucket's
|
||||||
|
# replacement cache.
|
||||||
|
asyncCheck k.bond(evictionCandidate)
|
||||||
|
|
||||||
|
proc doSleep(p: proc()) {.async.} =
|
||||||
|
await sleepAsync(REQUEST_TIMEOUT)
|
||||||
|
p()
|
||||||
|
|
||||||
|
template onTimeout(b: untyped) =
|
||||||
|
asyncCheck doSleep() do():
|
||||||
|
b
|
||||||
|
|
||||||
|
proc pingId(n: Node, token: seq[byte]): seq[byte] {.inline.} =
|
||||||
|
result = token & @(n.node.pubkey.data)
|
||||||
|
|
||||||
|
proc waitPong(k: KademliaProtocol, n: Node, pingid: seq[byte]): Future[bool] =
|
||||||
|
assert(pingid notin k.pongFutures, "Already waiting for pong from " & $n)
|
||||||
|
result = newFuture[bool]("waitPong")
|
||||||
|
let fut = result
|
||||||
|
k.pongFutures[pingid] = result
|
||||||
|
onTimeout:
|
||||||
|
if not fut.finished:
|
||||||
|
k.pongFutures.del(pingid)
|
||||||
|
fut.complete(false)
|
||||||
|
|
||||||
|
proc ping(k: KademliaProtocol, n: Node): seq[byte] =
|
||||||
|
assert(n != k.thisNode)
|
||||||
|
result = k.wire.sendPing(n)
|
||||||
|
|
||||||
|
proc waitPing(k: KademliaProtocol, n: Node): Future[bool] =
|
||||||
|
result = newFuture[bool]("waitPing")
|
||||||
|
assert(n notin k.pingFutures)
|
||||||
|
k.pingFutures[n] = result
|
||||||
|
let fut = result
|
||||||
|
onTimeout:
|
||||||
|
if not fut.finished:
|
||||||
|
k.pingFutures.del(n)
|
||||||
|
fut.complete(false)
|
||||||
|
|
||||||
|
proc waitNeighbours(k: KademliaProtocol, remote: Node): Future[seq[Node]] =
|
||||||
|
assert(remote notin k.neighboursCallbacks)
|
||||||
|
result = newFuture[seq[Node]]("waitNeighbours")
|
||||||
|
let fut = result
|
||||||
|
var neighbours = newSeqOfCap[Node](BUCKET_SIZE)
|
||||||
|
k.neighboursCallbacks[remote] = proc(n: seq[Node]) =
|
||||||
|
# This callback is expected to be called multiple times because nodes usually
|
||||||
|
# split the neighbours replies into multiple packets, so we only complete the
|
||||||
|
# future event.set() we've received enough neighbours.
|
||||||
|
|
||||||
|
for i in n:
|
||||||
|
if i != k.thisNode:
|
||||||
|
neighbours.add(i)
|
||||||
|
if neighbours.len == BUCKET_SIZE:
|
||||||
|
k.neighboursCallbacks.del(remote)
|
||||||
|
assert(not fut.finished)
|
||||||
|
fut.complete(neighbours)
|
||||||
|
|
||||||
|
onTimeout:
|
||||||
|
if not fut.finished:
|
||||||
|
k.neighboursCallbacks.del(remote)
|
||||||
|
fut.complete(neighbours)
|
||||||
|
|
||||||
|
proc populateNotFullBuckets(k: KademliaProtocol) =
|
||||||
|
## Go through all buckets that are not full and try to fill them.
|
||||||
|
##
|
||||||
|
## For every node in the replacement cache of every non-full bucket, try to bond.
|
||||||
|
## When the bonding succeeds the node is automatically added to the bucket.
|
||||||
|
for bucket in k.routing.notFullBuckets:
|
||||||
|
for node in bucket.replacementCache:
|
||||||
|
asyncCheck k.bond(node)
|
||||||
|
|
||||||
|
proc bond(k: KademliaProtocol, n: Node): Future[bool] {.async.} =
|
||||||
|
## Bond with the given node.
|
||||||
|
##
|
||||||
|
## Bonding consists of pinging the node, waiting for a pong and maybe a ping as well.
|
||||||
|
## It is necessary to do this at least once before we send findNode requests to a node.
|
||||||
|
info "Bonding to peer", n
|
||||||
|
if n in k.routing:
|
||||||
|
return true
|
||||||
|
|
||||||
|
let pid = pingId(n, k.ping(n))
|
||||||
|
if pid in k.pongFutures:
|
||||||
|
debug "Binding failed, already waiting for pong", n
|
||||||
|
return false
|
||||||
|
|
||||||
|
let gotPong = await k.waitPong(n, pid)
|
||||||
|
if not gotPong:
|
||||||
|
debug "Bonding failed, didn't receive pong from", n
|
||||||
|
# Drop the failing node and schedule a populateNotFullBuckets() call to try and
|
||||||
|
# fill its spot.
|
||||||
|
k.routing.removeNode(n)
|
||||||
|
k.populateNotFullBuckets()
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Give the remote node a chance to ping us before we move on and start sending findNode
|
||||||
|
# requests. It is ok for waitPing() to timeout and return false here as that just means
|
||||||
|
# the remote remembers us.
|
||||||
|
if n in k.pingFutures:
|
||||||
|
debug "Bonding failed, already waiting for ping", n
|
||||||
|
return false
|
||||||
|
|
||||||
|
discard await k.waitPing(n)
|
||||||
|
|
||||||
|
debug "Bonding completed successfully", n
|
||||||
|
k.updateRoutingTable(n)
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc sortByDistance(nodes: var seq[Node], nodeId: NodeId, maxResults = 0) =
|
||||||
|
nodes = nodes.sortedByIt(it.distanceTo(nodeId))
|
||||||
|
if maxResults != 0 and nodes.len > maxResults:
|
||||||
|
nodes.setLen(maxResults)
|
||||||
|
|
||||||
|
proc lookup*(k: KademliaProtocol, nodeId: NodeId): Future[seq[Node]] {.async.} =
|
||||||
|
## Lookup performs a network search for nodes close to the given target.
|
||||||
|
|
||||||
|
## It approaches the target by querying nodes that are closer to it on each iteration. The
|
||||||
|
## given target does not need to be an actual node identifier.
|
||||||
|
var nodesAsked = initSet[Node]()
|
||||||
|
var nodesSeen = initSet[Node]()
|
||||||
|
|
||||||
|
proc findNode(nodeId: NodeId, remote: Node): Future[seq[Node]] {.async.} =
|
||||||
|
k.wire.sendFindNode(remote, nodeId)
|
||||||
|
var candidates = await k.waitNeighbours(remote)
|
||||||
|
if candidates.len == 0:
|
||||||
|
trace "Got no candidates from peer, returning", peer = remote
|
||||||
|
result = candidates
|
||||||
|
else:
|
||||||
|
# The following line:
|
||||||
|
# 1. Add new candidates to nodesSeen so that we don't attempt to bond with failing ones
|
||||||
|
# in the future
|
||||||
|
# 2. Removes all previously seen nodes from candidates
|
||||||
|
# 3. Deduplicates candidates
|
||||||
|
candidates.keepItIf(not nodesSeen.containsOrIncl(it))
|
||||||
|
trace "Got new candidates", count = candidates.len
|
||||||
|
let bonded = await all(candidates.mapIt(k.bond(it)))
|
||||||
|
for i in 0 ..< bonded.len:
|
||||||
|
if not bonded[i]: candidates[i] = nil
|
||||||
|
candidates.keepItIf(not it.isNil)
|
||||||
|
trace "Bonded with candidates", count = candidates.len
|
||||||
|
result = candidates
|
||||||
|
|
||||||
|
proc excludeIfAsked(nodes: seq[Node]): seq[Node] =
|
||||||
|
result = toSeq(items(nodes.toSet() - nodesAsked))
|
||||||
|
sortByDistance(result, nodeId, FIND_CONCURRENCY)
|
||||||
|
|
||||||
|
var closest = k.routing.neighbours(nodeId)
|
||||||
|
trace "Starting lookup; initial neighbours: ", closest
|
||||||
|
var nodesToAsk = excludeIfAsked(closest)
|
||||||
|
while nodesToAsk.len != 0:
|
||||||
|
trace "Node lookup; querying ", nodesToAsk
|
||||||
|
nodesAsked.incl(nodesToAsk.toSet())
|
||||||
|
let results = await all(nodesToAsk.mapIt(findNode(nodeId, it)))
|
||||||
|
for candidates in results:
|
||||||
|
closest.add(candidates)
|
||||||
|
sortByDistance(closest, nodeId, BUCKET_SIZE)
|
||||||
|
nodesToAsk = excludeIfAsked(closest)
|
||||||
|
|
||||||
|
trace "Kademlia lookup finished", target = nodeId.toHex, closest
|
||||||
|
result = closest
|
||||||
|
|
||||||
|
proc lookupRandom*(k: KademliaProtocol): Future[seq[Node]] =
|
||||||
|
var id: NodeId
|
||||||
|
discard randomBytes(addr id, id.sizeof)
|
||||||
|
k.lookup(id)
|
||||||
|
|
||||||
|
proc resolve*(k: KademliaProtocol, id: NodeId): Future[Node] {.async.} =
|
||||||
|
let closest = await k.lookup(id)
|
||||||
|
for n in closest:
|
||||||
|
if n.id == id: return n
|
||||||
|
|
||||||
|
proc bootstrap*(k: KademliaProtocol, bootstrapNodes: seq[Node]) {.async.} =
|
||||||
|
let bonded = await all(bootstrapNodes.mapIt(k.bond(it)))
|
||||||
|
if true notin bonded:
|
||||||
|
info "Failed to bond with bootstrap nodes "
|
||||||
|
return
|
||||||
|
discard await k.lookupRandom()
|
||||||
|
|
||||||
|
proc recvPong*(k: KademliaProtocol, n: Node, token: seq[byte]) =
|
||||||
|
debug "<<< pong from ", n
|
||||||
|
let pingid = token & @(n.node.pubkey.data)
|
||||||
|
var future: Future[bool]
|
||||||
|
if k.pongFutures.take(pingid, future):
|
||||||
|
future.complete(true)
|
||||||
|
|
||||||
|
proc recvPing*(k: KademliaProtocol, n: Node, msgHash: any) =
|
||||||
|
debug "<<< ping from ", n
|
||||||
|
k.updateRoutingTable(n)
|
||||||
|
k.wire.sendPong(n, msgHash)
|
||||||
|
|
||||||
|
var future: Future[bool]
|
||||||
|
if k.pingFutures.take(n, future):
|
||||||
|
future.complete(true)
|
||||||
|
|
||||||
|
proc recvNeighbours*(k: KademliaProtocol, remote: Node, neighbours: seq[Node]) =
|
||||||
|
## Process a neighbours response.
|
||||||
|
##
|
||||||
|
## Neighbours responses should only be received as a reply to a find_node, and that is only
|
||||||
|
## done as part of node lookup, so the actual processing is left to the callback from
|
||||||
|
## neighbours_callbacks, which is added (and removed after it's done or timed out) in
|
||||||
|
## wait_neighbours().
|
||||||
|
debug "Received neighbours", remote, neighbours
|
||||||
|
let cb = k.neighboursCallbacks.getOrDefault(remote)
|
||||||
|
if not cb.isNil:
|
||||||
|
cb(neighbours)
|
||||||
|
else:
|
||||||
|
debug "Unexpected neighbours, probably came too late", remote
|
||||||
|
|
||||||
|
proc recvFindNode*(k: KademliaProtocol, remote: Node, nodeId: NodeId) =
|
||||||
|
if remote notin k.routing:
|
||||||
|
# FIXME: This is not correct; a node we've bonded before may have become unavailable
|
||||||
|
# and thus removed from self.routing, but once it's back online we should accept
|
||||||
|
# find_nodes from them.
|
||||||
|
debug "Ignoring find_node request from unknown node ", remote
|
||||||
|
return
|
||||||
|
k.updateRoutingTable(remote)
|
||||||
|
var found = k.routing.neighbours(nodeId)
|
||||||
|
found.sort() do(x, y: Node) -> int: cmp(x.id, y.id)
|
||||||
|
k.wire.sendNeighbours(remote, found)
|
||||||
|
|
||||||
|
proc randomNodes*(k: KademliaProtocol, count: int): seq[Node] =
|
||||||
|
var count = count
|
||||||
|
let sz = k.routing.len
|
||||||
|
if count > sz:
|
||||||
|
debug "Not enough nodes", requested = count, present = sz
|
||||||
|
count = sz
|
||||||
|
|
||||||
|
result = newSeqOfCap[Node](count)
|
||||||
|
var seen = initSet[Node]()
|
||||||
|
|
||||||
|
# This is a rather inneficient way of randomizing nodes from all buckets, but even if we
|
||||||
|
# iterate over all nodes in the routing table, the time it takes would still be
|
||||||
|
# insignificant compared to the time it takes for the network roundtrips when connecting
|
||||||
|
# to nodes.
|
||||||
|
while len(seen) < count:
|
||||||
|
let bucket = k.routing.buckets.rand()
|
||||||
|
if bucket.nodes.len != 0:
|
||||||
|
let node = bucket.nodes.rand()
|
||||||
|
if node notin seen:
|
||||||
|
result.add(node)
|
||||||
|
seen.incl(node)
|
||||||
|
|
||||||
|
proc nodesDiscovered*(k: KademliaProtocol): int {.inline.} = k.routing.len
|
||||||
|
|
||||||
|
when isMainModule:
|
||||||
|
proc randomNode(): Node =
|
||||||
|
newNode("enode://aa36fdf33dd030378a0168efe6ed7d5cc587fafa3cdd375854fe735a2e11ea3650ba29644e2db48368c46e1f60e716300ba49396cd63778bf8a818c09bded46f@13.93.211.84:30303")
|
||||||
|
|
||||||
|
var nodes = @[randomNode()]
|
||||||
|
doAssert(computeSharedPrefixBits(nodes) == ID_SIZE)
|
||||||
|
nodes.add(randomNode())
|
||||||
|
nodes[0].id = 0b1.u256
|
||||||
|
nodes[1].id = 0b0.u256
|
||||||
|
doAssert(computeSharedPrefixBits(nodes) == ID_SIZE - 1)
|
||||||
|
|
||||||
|
nodes[0].id = 0b010.u256
|
||||||
|
nodes[1].id = 0b110.u256
|
||||||
|
doAssert(computeSharedPrefixBits(nodes) == ID_SIZE - 3)
|
|
@ -0,0 +1,218 @@
|
||||||
|
import
|
||||||
|
macros, deques, algorithm,
|
||||||
|
asyncdispatch2, eth/[keys, rlp], eth/common/eth_types,
|
||||||
|
private/p2p_types, rlpx, ../p2p
|
||||||
|
|
||||||
|
type
|
||||||
|
Action = proc (p: Peer, data: Rlp): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
ProtocolMessagePair = object
|
||||||
|
protocol: ProtocolInfo
|
||||||
|
id: int
|
||||||
|
|
||||||
|
ExpectedMsg = object
|
||||||
|
msg: ProtocolMessagePair
|
||||||
|
response: Action
|
||||||
|
|
||||||
|
MockConf* = ref object
|
||||||
|
keys*: KeyPair
|
||||||
|
address*: Address
|
||||||
|
networkId*: uint
|
||||||
|
chain*: AbstractChainDb
|
||||||
|
clientId*: string
|
||||||
|
waitForHello*: bool
|
||||||
|
|
||||||
|
devp2pHandshake: ExpectedMsg
|
||||||
|
handshakes: seq[ExpectedMsg]
|
||||||
|
protocols: seq[ProtocolInfo]
|
||||||
|
|
||||||
|
expectedMsgs: Deque[ExpectedMsg]
|
||||||
|
receivedMsgsCount: int
|
||||||
|
when useSnappy:
|
||||||
|
useCompression*: bool
|
||||||
|
|
||||||
|
var
|
||||||
|
nextUnusedMockPort = 40304
|
||||||
|
|
||||||
|
proc toAction(a: Action): Action = a
|
||||||
|
|
||||||
|
proc toAction[N](actions: array[N, Action]): Action =
|
||||||
|
mixin await
|
||||||
|
result = proc (peer: Peer, data: Rlp) {.async.} =
|
||||||
|
for a in actions:
|
||||||
|
await a(peer, data)
|
||||||
|
|
||||||
|
proc toAction(a: proc (): Future[void]): Action =
|
||||||
|
result = proc (peer: Peer, data: Rlp) {.async.} =
|
||||||
|
await a()
|
||||||
|
|
||||||
|
proc toAction(a: proc (peer: Peer): Future[void]): Action =
|
||||||
|
result = proc (peer: Peer, data: Rlp) {.async.} =
|
||||||
|
await a(peer)
|
||||||
|
|
||||||
|
proc delay*(duration: int): Action =
|
||||||
|
result = proc (p: Peer, data: Rlp) {.async.} =
|
||||||
|
await sleepAsync(duration)
|
||||||
|
|
||||||
|
proc reply(bytes: Bytes): Action =
|
||||||
|
result = proc (p: Peer, data: Rlp) {.async.} =
|
||||||
|
await p.sendMsg(bytes)
|
||||||
|
|
||||||
|
proc reply*[Msg](msg: Msg): Action =
|
||||||
|
mixin await
|
||||||
|
result = proc (p: Peer, data: Rlp) {.async.} =
|
||||||
|
await p.send(msg)
|
||||||
|
|
||||||
|
proc localhostAddress*(port: int): Address =
|
||||||
|
let port = Port(port)
|
||||||
|
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
|
proc makeProtoMsgPair(MsgType: type): ProtocolMessagePair =
|
||||||
|
mixin msgProtocol, protocolInfo
|
||||||
|
result.protocol = MsgType.msgProtocol.protocolInfo
|
||||||
|
result.id = MsgType.msgId
|
||||||
|
|
||||||
|
proc readReqId*(rlp: Rlp): int =
|
||||||
|
var r = rlp
|
||||||
|
return r.read(int)
|
||||||
|
|
||||||
|
proc expectationViolationMsg(mock: MockConf,
|
||||||
|
reason: string,
|
||||||
|
receivedMsg: ptr MessageInfo): string =
|
||||||
|
result = "[Mock expectation violated] " & reason & ": " & receivedMsg.name
|
||||||
|
for i in 0 ..< mock.expectedMsgs.len:
|
||||||
|
let expected = mock.expectedMsgs[i].msg
|
||||||
|
result.add "\n " & expected.protocol.messages[expected.id].name
|
||||||
|
if i == mock.receivedMsgsCount: result.add " <- we are here"
|
||||||
|
result.add "\n"
|
||||||
|
|
||||||
|
proc addProtocol(mock: MockConf, p: ProtocolInfo): ProtocolInfo =
|
||||||
|
result = create ProtocolInfoObj
|
||||||
|
deepCopy(result[], p[])
|
||||||
|
|
||||||
|
proc incomingMsgHandler(p: Peer, receivedMsgId: int, rlp: Rlp): Future[void] {.gcsafe.} =
|
||||||
|
let (receivedMsgProto, receivedMsgInfo) = p.getMsgMetadata(receivedMsgId)
|
||||||
|
let expectedMsgIdx = mock.receivedMsgsCount
|
||||||
|
|
||||||
|
template fail(reason: string) =
|
||||||
|
stdout.write mock.expectationViolationMsg(reason, receivedMsgInfo)
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
if expectedMsgIdx > mock.expectedMsgs.len:
|
||||||
|
fail "Mock peer received more messages than expected"
|
||||||
|
|
||||||
|
let expectedMsg = mock.expectedMsgs[expectedMsgIdx]
|
||||||
|
if receivedMsgInfo.id != expectedMsg.msg.id or
|
||||||
|
receivedMsgProto.name != expectedMsg.msg.protocol.name:
|
||||||
|
fail "Mock peer received an unexpected message"
|
||||||
|
|
||||||
|
inc mock.receivedMsgsCount
|
||||||
|
if expectedMsg.response != nil:
|
||||||
|
return expectedMsg.response(p, rlp)
|
||||||
|
else:
|
||||||
|
result = newFuture[void]()
|
||||||
|
result.complete()
|
||||||
|
|
||||||
|
for m in mitems(result.messages):
|
||||||
|
m.thunk = incomingMsgHandler
|
||||||
|
|
||||||
|
result.handshake = nil
|
||||||
|
|
||||||
|
# TODO This mock conf can override this
|
||||||
|
result.disconnectHandler = nil
|
||||||
|
|
||||||
|
mock.protocols.add result
|
||||||
|
|
||||||
|
proc addHandshake*(mock: MockConf, msg: auto) =
|
||||||
|
var msgInfo = makeProtoMsgPair(msg.type)
|
||||||
|
msgInfo.protocol = mock.addProtocol(msgInfo.protocol)
|
||||||
|
let expectedMsg = ExpectedMsg(msg: msgInfo, response: reply(msg))
|
||||||
|
|
||||||
|
when msg is devp2p.hello:
|
||||||
|
devp2pHandshake = expectedMsg
|
||||||
|
else:
|
||||||
|
mock.handshakes.add expectedMsg
|
||||||
|
|
||||||
|
proc addCapability*(mock: MockConf, Protocol: type) =
|
||||||
|
mixin defaultTestingHandshake, protocolInfo
|
||||||
|
|
||||||
|
when compiles(defaultTestingHandshake(Protocol)):
|
||||||
|
mock.addHandshake(defaultTestingHandshake(Protocol))
|
||||||
|
else:
|
||||||
|
discard mock.addProtocol(Protocol.protocolInfo)
|
||||||
|
|
||||||
|
proc expectImpl(mock: MockConf, msg: ProtocolMessagePair, action: Action) =
|
||||||
|
mock.expectedMsgs.addLast ExpectedMsg(msg: msg, response: action)
|
||||||
|
|
||||||
|
macro expect*(mock: MockConf, MsgType: type, handler: untyped = nil): untyped =
|
||||||
|
if handler.kind in {nnkLambda, nnkDo}:
|
||||||
|
handler.addPragma ident("async")
|
||||||
|
|
||||||
|
result = newCall(
|
||||||
|
bindSym("expectImpl"),
|
||||||
|
mock,
|
||||||
|
newCall(bindSym"makeProtoMsgPair", MsgType.getType),
|
||||||
|
newCall(bindSym"toAction", handler))
|
||||||
|
|
||||||
|
template compression(m: MockConf): bool =
|
||||||
|
when useSnappy:
|
||||||
|
m.useCompression
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
|
||||||
|
proc newMockPeer*(userConfigurator: proc (m: MockConf)): EthereumNode =
|
||||||
|
var mockConf = new MockConf
|
||||||
|
mockConf.keys = newKeyPair()
|
||||||
|
mockConf.address = localhostAddress(nextUnusedMockPort)
|
||||||
|
inc nextUnusedMockPort
|
||||||
|
mockConf.networkId = 1'u
|
||||||
|
mockConf.clientId = "Mock Peer"
|
||||||
|
mockConf.waitForHello = true
|
||||||
|
mockConf.expectedMsgs = initDeque[ExpectedMsg]()
|
||||||
|
|
||||||
|
userConfigurator(mockConf)
|
||||||
|
|
||||||
|
var node = newEthereumNode(mockConf.keys,
|
||||||
|
mockConf.address,
|
||||||
|
mockConf.networkId,
|
||||||
|
mockConf.chain,
|
||||||
|
mockConf.clientId,
|
||||||
|
addAllCapabilities = false,
|
||||||
|
mockConf.compression())
|
||||||
|
|
||||||
|
mockConf.handshakes.sort do (lhs, rhs: ExpectedMsg) -> int:
|
||||||
|
# this is intentially sorted in reverse order, so we
|
||||||
|
# can add them in the correct order below.
|
||||||
|
return -cmp(lhs.msg.protocol.index, rhs.msg.protocol.index)
|
||||||
|
|
||||||
|
for h in mockConf.handshakes:
|
||||||
|
mockConf.expectedMsgs.addFirst h
|
||||||
|
|
||||||
|
for p in mockConf.protocols:
|
||||||
|
node.addCapability p
|
||||||
|
|
||||||
|
when false:
|
||||||
|
# TODO: This part doesn't work correctly yet.
|
||||||
|
# rlpx{Connect,Accept} control the handshake.
|
||||||
|
if mockConf.devp2pHandshake.response != nil:
|
||||||
|
mockConf.expectedMsgs.addFirst mockConf.devp2pHandshake
|
||||||
|
else:
|
||||||
|
proc sendHello(p: Peer, data: Rlp) {.async.} =
|
||||||
|
await p.hello(devp2pVersion,
|
||||||
|
mockConf.clientId,
|
||||||
|
node.capabilities,
|
||||||
|
uint(node.address.tcpPort),
|
||||||
|
node.keys.pubkey.getRaw())
|
||||||
|
|
||||||
|
mockConf.expectedMsgs.addFirst ExpectedMsg(
|
||||||
|
msg: makeProtoMsgPair(p2p.hello),
|
||||||
|
response: sendHello)
|
||||||
|
|
||||||
|
node.startListening()
|
||||||
|
return node
|
||||||
|
|
||||||
|
proc rlpxConnect*(node, otherNode: EthereumNode): Future[Peer] =
|
||||||
|
let otherAsRemote = newNode(initENode(otherNode.keys.pubKey,
|
||||||
|
otherNode.address))
|
||||||
|
return rlpx.rlpxConnect(node, otherAsRemote)
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
import
|
||||||
|
private/p2p_types
|
||||||
|
|
||||||
|
const tracingEnabled* = defined(p2pdump)
|
||||||
|
|
||||||
|
when tracingEnabled:
|
||||||
|
import
|
||||||
|
macros,
|
||||||
|
serialization, json_serialization/writer,
|
||||||
|
chronicles, chronicles_tail/configuration
|
||||||
|
|
||||||
|
export
|
||||||
|
# XXX: Nim visibility rules get in the way here.
|
||||||
|
# It would be nice if the users of this module don't have to
|
||||||
|
# import json_serializer, but this won't work at the moment,
|
||||||
|
# because the `encode` call inside `logMsgEvent` has its symbols
|
||||||
|
# mixed in from the module where `logMsgEvent` is called
|
||||||
|
# (instead of from this module, which will be more logical).
|
||||||
|
init, writeValue, getOutput
|
||||||
|
# TODO: File this as an issue
|
||||||
|
|
||||||
|
logStream p2pMessages[json[file(p2p_messages.json,truncate)]]
|
||||||
|
p2pMessages.useTailPlugin "p2p_tracing_ctail_plugin.nim"
|
||||||
|
|
||||||
|
template logRecord(eventName: static[string], args: varargs[untyped]) =
|
||||||
|
p2pMessages.log LogLevel.NONE, eventName, topics = "p2pdump", args
|
||||||
|
|
||||||
|
proc initTracing*(baseProtocol: ProtocolInfo,
|
||||||
|
userProtocols: seq[ProtocolInfo]) =
|
||||||
|
once:
|
||||||
|
var w = init StringJsonWriter
|
||||||
|
|
||||||
|
proc addProtocol(p: ProtocolInfo) =
|
||||||
|
w.writeFieldName p.name
|
||||||
|
w.beginRecord()
|
||||||
|
for msg in p.messages:
|
||||||
|
w.writeField $msg.id, msg.name
|
||||||
|
w.endRecordField()
|
||||||
|
|
||||||
|
w.beginRecord()
|
||||||
|
addProtocol baseProtocol
|
||||||
|
for userProtocol in userProtocols:
|
||||||
|
addProtocol userProtocol
|
||||||
|
w.endRecord()
|
||||||
|
|
||||||
|
logRecord "p2p_protocols", data = JsonString(w.getOutput)
|
||||||
|
|
||||||
|
proc logMsgEventImpl(eventName: static[string],
|
||||||
|
peer: Peer,
|
||||||
|
protocol: ProtocolInfo,
|
||||||
|
msgId: int,
|
||||||
|
json: string) =
|
||||||
|
# this is kept as a separate proc to reduce the code bloat
|
||||||
|
logRecord eventName, port = int(peer.network.address.tcpPort),
|
||||||
|
peer = $peer.remote,
|
||||||
|
protocol = protocol.name,
|
||||||
|
msgId, data = JsonString(json)
|
||||||
|
|
||||||
|
proc logMsgEvent[Msg](eventName: static[string], peer: Peer, msg: Msg) =
|
||||||
|
mixin msgProtocol, protocolInfo, msgId
|
||||||
|
|
||||||
|
logMsgEventImpl(eventName, peer,
|
||||||
|
Msg.msgProtocol.protocolInfo,
|
||||||
|
Msg.msgId,
|
||||||
|
StringJsonWriter.encode(msg))
|
||||||
|
|
||||||
|
proc logSentMsgFields*(peer: NimNode,
|
||||||
|
protocolInfo: NimNode,
|
||||||
|
msgId: int,
|
||||||
|
fields: openarray[NimNode]): NimNode =
|
||||||
|
## This generates the tracing code inserted in the message sending procs
|
||||||
|
## `fields` contains all the params that were serialized in the message
|
||||||
|
var tracer = ident("tracer")
|
||||||
|
|
||||||
|
result = quote do:
|
||||||
|
var `tracer` = init StringJsonWriter
|
||||||
|
beginRecord(`tracer`)
|
||||||
|
|
||||||
|
for f in fields:
|
||||||
|
result.add newCall(bindSym"writeField", tracer, newLit($f), f)
|
||||||
|
|
||||||
|
result.add quote do:
|
||||||
|
endRecord(`tracer`)
|
||||||
|
logMsgEventImpl("outgoing_msg", `peer`,
|
||||||
|
`protocolInfo`, `msgId`, getOutput(`tracer`))
|
||||||
|
|
||||||
|
template logSentMsg*(peer: Peer, msg: auto) =
|
||||||
|
logMsgEvent("outgoing_msg", peer, msg)
|
||||||
|
|
||||||
|
template logReceivedMsg*(peer: Peer, msg: auto) =
|
||||||
|
logMsgEvent("incoming_msg", peer, msg)
|
||||||
|
|
||||||
|
template logConnectedPeer*(p: Peer) =
|
||||||
|
logRecord "peer_connected",
|
||||||
|
port = int(p.network.address.tcpPort),
|
||||||
|
peer = $p.remote
|
||||||
|
|
||||||
|
template logAcceptedPeer*(p: Peer) =
|
||||||
|
logRecord "peer_accepted",
|
||||||
|
port = int(p.network.address.tcpPort),
|
||||||
|
peer = $p.remote
|
||||||
|
|
||||||
|
template logDisconnectedPeer*(p: Peer) =
|
||||||
|
logRecord "peer_disconnected",
|
||||||
|
port = int(p.network.address.tcpPort),
|
||||||
|
peer = $p.remote
|
||||||
|
|
||||||
|
else:
|
||||||
|
template initTracing*(baseProtocol: ProtocolInfo,
|
||||||
|
userProtocols: seq[ProtocolInfo])= discard
|
||||||
|
template logSentMsg*(peer: Peer, msg: auto) = discard
|
||||||
|
template logReceivedMsg*(peer: Peer, msg: auto) = discard
|
||||||
|
template logConnectedPeer*(peer: Peer) = discard
|
||||||
|
template logAcceptedPeer*(peer: Peer) = discard
|
||||||
|
template logDisconnectedPeer*(peer: Peer) = discard
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
import
|
||||||
|
karax/[karaxdsl, vdom]
|
||||||
|
|
||||||
|
import
|
||||||
|
chronicles_tail/jsplugins
|
||||||
|
|
||||||
|
proc networkSectionContent: VNode =
|
||||||
|
result = buildHtml(tdiv):
|
||||||
|
text "Networking"
|
||||||
|
|
||||||
|
addSection("Network", networkSectionContent)
|
||||||
|
|
|
@ -0,0 +1,211 @@
|
||||||
|
# PeerPool attempts to keep connections to at least min_peers
|
||||||
|
# on the given network.
|
||||||
|
|
||||||
|
import
|
||||||
|
os, tables, times, random, sequtils,
|
||||||
|
asyncdispatch2, chronicles, eth/[rlp, keys],
|
||||||
|
private/p2p_types, discovery, kademlia, rlpx
|
||||||
|
|
||||||
|
const
|
||||||
|
lookupInterval = 5
|
||||||
|
connectLoopSleepMs = 2000
|
||||||
|
|
||||||
|
proc newPeerPool*(network: EthereumNode,
|
||||||
|
networkId: uint, keyPair: KeyPair,
|
||||||
|
discovery: DiscoveryProtocol, clientId: string,
|
||||||
|
listenPort = Port(30303), minPeers = 10): PeerPool =
|
||||||
|
new result
|
||||||
|
result.network = network
|
||||||
|
result.keyPair = keyPair
|
||||||
|
result.minPeers = minPeers
|
||||||
|
result.networkId = networkId
|
||||||
|
result.discovery = discovery
|
||||||
|
result.connectedNodes = initTable[Node, Peer]()
|
||||||
|
result.connectingNodes = initSet[Node]()
|
||||||
|
result.observers = initTable[int, PeerObserver]()
|
||||||
|
result.listenPort = listenPort
|
||||||
|
|
||||||
|
template ensureFuture(f: untyped) = asyncCheck f
|
||||||
|
|
||||||
|
proc nodesToConnect(p: PeerPool): seq[Node] {.inline.} =
|
||||||
|
p.discovery.randomNodes(p.minPeers).filterIt(it notin p.discovery.bootstrapNodes)
|
||||||
|
|
||||||
|
proc addObserver(p: PeerPool, observerId: int, observer: PeerObserver) =
|
||||||
|
assert(observerId notin p.observers)
|
||||||
|
p.observers[observerId] = observer
|
||||||
|
if not observer.onPeerConnected.isNil:
|
||||||
|
for peer in p.connectedNodes.values:
|
||||||
|
observer.onPeerConnected(peer)
|
||||||
|
|
||||||
|
proc delObserver(p: PeerPool, observerId: int) =
|
||||||
|
p.observers.del(observerId)
|
||||||
|
|
||||||
|
proc addObserver*(p: PeerPool, observerId: ref, observer: PeerObserver) {.inline.} =
|
||||||
|
p.addObserver(cast[int](observerId), observer)
|
||||||
|
|
||||||
|
proc delObserver*(p: PeerPool, observerId: ref) {.inline.} =
|
||||||
|
p.delObserver(cast[int](observerId))
|
||||||
|
|
||||||
|
proc stopAllPeers(p: PeerPool) {.async.} =
|
||||||
|
debug "Stopping all peers ..."
|
||||||
|
# TODO: ...
|
||||||
|
# await asyncio.gather(
|
||||||
|
# *[peer.stop() for peer in self.connected_nodes.values()])
|
||||||
|
|
||||||
|
# async def stop(self) -> None:
|
||||||
|
# self.cancel_token.trigger()
|
||||||
|
# await self.stop_all_peers()
|
||||||
|
|
||||||
|
proc connect(p: PeerPool, remote: Node): Future[Peer] {.async.} =
|
||||||
|
## Connect to the given remote and return a Peer instance when successful.
|
||||||
|
## Returns nil if the remote is unreachable, times out or is useless.
|
||||||
|
if remote in p.connectedNodes:
|
||||||
|
trace "skipping_connection_to_already_connected_peer", remote
|
||||||
|
return nil
|
||||||
|
|
||||||
|
if remote in p.connectingNodes:
|
||||||
|
# debug "skipping connection"
|
||||||
|
return nil
|
||||||
|
|
||||||
|
trace "Connecting to node", remote
|
||||||
|
p.connectingNodes.incl(remote)
|
||||||
|
result = await p.network.rlpxConnect(remote)
|
||||||
|
p.connectingNodes.excl(remote)
|
||||||
|
|
||||||
|
# expected_exceptions = (
|
||||||
|
# UnreachablePeer, TimeoutError, PeerConnectionLost, HandshakeFailure)
|
||||||
|
# try:
|
||||||
|
# self.logger.debug("Connecting to %s...", remote)
|
||||||
|
# peer = await wait_with_token(
|
||||||
|
# handshake(remote, self.privkey, self.peer_class, self.network_id),
|
||||||
|
# token=self.cancel_token,
|
||||||
|
# timeout=HANDSHAKE_TIMEOUT)
|
||||||
|
# return peer
|
||||||
|
# except OperationCancelled:
|
||||||
|
# # Pass it on to instruct our main loop to stop.
|
||||||
|
# raise
|
||||||
|
# except expected_exceptions as e:
|
||||||
|
# self.logger.debug("Could not complete handshake with %s: %s", remote, repr(e))
|
||||||
|
# except Exception:
|
||||||
|
# self.logger.exception("Unexpected error during auth/p2p handshake with %s", remote)
|
||||||
|
# return None
|
||||||
|
|
||||||
|
proc lookupRandomNode(p: PeerPool) {.async.} =
|
||||||
|
# This method runs in the background, so we must catch OperationCancelled
|
||||||
|
# ere otherwise asyncio will warn that its exception was never retrieved.
|
||||||
|
try:
|
||||||
|
discard await p.discovery.lookupRandom()
|
||||||
|
except: # OperationCancelled
|
||||||
|
discard
|
||||||
|
p.lastLookupTime = epochTime()
|
||||||
|
|
||||||
|
proc getRandomBootnode(p: PeerPool): Node =
|
||||||
|
p.discovery.bootstrapNodes.rand()
|
||||||
|
|
||||||
|
proc addPeer*(pool: PeerPool, peer: Peer): bool =
|
||||||
|
if peer.remote notin pool.connectedNodes:
|
||||||
|
pool.connectedNodes[peer.remote] = peer
|
||||||
|
for o in pool.observers.values:
|
||||||
|
if not o.onPeerConnected.isNil:
|
||||||
|
o.onPeerConnected(peer)
|
||||||
|
return true
|
||||||
|
else: return false
|
||||||
|
|
||||||
|
proc connectToNode*(p: PeerPool, n: Node) {.async.} =
|
||||||
|
let peer = await p.connect(n)
|
||||||
|
if not peer.isNil:
|
||||||
|
trace "Connection established", peer
|
||||||
|
if not p.addPeer(peer):
|
||||||
|
# In case an incoming connection was added in the meanwhile
|
||||||
|
trace "Disconnecting peer (outgoing)", reason = AlreadyConnected
|
||||||
|
await peer.disconnect(AlreadyConnected)
|
||||||
|
|
||||||
|
proc connectToNodes(p: PeerPool, nodes: seq[Node]) {.async.} =
|
||||||
|
for node in nodes:
|
||||||
|
discard p.connectToNode(node)
|
||||||
|
|
||||||
|
# # TODO: Consider changing connect() to raise an exception instead of
|
||||||
|
# # returning None, as discussed in
|
||||||
|
# # https://github.com/ethereum/py-evm/pull/139#discussion_r152067425
|
||||||
|
# echo "Connecting to node: ", node
|
||||||
|
# let peer = await p.connect(node)
|
||||||
|
# if not peer.isNil:
|
||||||
|
# info "Successfully connected to ", peer
|
||||||
|
# ensureFuture peer.run(p)
|
||||||
|
|
||||||
|
# p.connectedNodes[peer.remote] = peer
|
||||||
|
# # for subscriber in self._subscribers:
|
||||||
|
# # subscriber.register_peer(peer)
|
||||||
|
# if p.connectedNodes.len >= p.minPeers:
|
||||||
|
# return
|
||||||
|
|
||||||
|
proc maybeConnectToMorePeers(p: PeerPool) {.async.} =
|
||||||
|
## Connect to more peers if we're not yet connected to at least self.minPeers.
|
||||||
|
if p.connectedNodes.len >= p.minPeers:
|
||||||
|
# debug "pool already connected to enough peers (sleeping)", count = p.connectedNodes
|
||||||
|
return
|
||||||
|
|
||||||
|
if p.lastLookupTime + lookupInterval < epochTime():
|
||||||
|
ensureFuture p.lookupRandomNode()
|
||||||
|
|
||||||
|
let debugEnode = getEnv("ETH_DEBUG_ENODE")
|
||||||
|
if debugEnode.len != 0:
|
||||||
|
await p.connectToNode(newNode(debugEnode))
|
||||||
|
else:
|
||||||
|
await p.connectToNodes(p.nodesToConnect())
|
||||||
|
|
||||||
|
# In some cases (e.g ROPSTEN or private testnets), the discovery table might
|
||||||
|
# be full of bad peers, so if we can't connect to any peers we try a random
|
||||||
|
# bootstrap node as well.
|
||||||
|
if p.connectedNodes.len == 0:
|
||||||
|
await p.connectToNode(p.getRandomBootnode())
|
||||||
|
|
||||||
|
proc run(p: PeerPool) {.async.} =
|
||||||
|
trace "Running PeerPool..."
|
||||||
|
p.running = true
|
||||||
|
while p.running:
|
||||||
|
var dropConnections = false
|
||||||
|
try:
|
||||||
|
await p.maybeConnectToMorePeers()
|
||||||
|
except Exception as e:
|
||||||
|
# Most unexpected errors should be transient, so we log and restart from
|
||||||
|
# scratch.
|
||||||
|
error "Unexpected PeerPool error, restarting",
|
||||||
|
err = getCurrentExceptionMsg(),
|
||||||
|
stackTrace = e.getStackTrace()
|
||||||
|
dropConnections = true
|
||||||
|
|
||||||
|
if dropConnections:
|
||||||
|
await p.stopAllPeers()
|
||||||
|
|
||||||
|
await sleepAsync(connectLoopSleepMs)
|
||||||
|
|
||||||
|
proc start*(p: PeerPool) =
|
||||||
|
if not p.running:
|
||||||
|
asyncCheck p.run()
|
||||||
|
|
||||||
|
proc len*(p: PeerPool): int = p.connectedNodes.len
|
||||||
|
# @property
|
||||||
|
# def peers(self) -> List[BasePeer]:
|
||||||
|
# peers = list(self.connected_nodes.values())
|
||||||
|
# # Shuffle the list of peers so that dumb callsites are less likely to send
|
||||||
|
# # all requests to
|
||||||
|
# # a single peer even if they always pick the first one from the list.
|
||||||
|
# random.shuffle(peers)
|
||||||
|
# return peers
|
||||||
|
|
||||||
|
# async def get_random_peer(self) -> BasePeer:
|
||||||
|
# while not self.peers:
|
||||||
|
# self.logger.debug("No connected peers, sleeping a bit")
|
||||||
|
# await asyncio.sleep(0.5)
|
||||||
|
# return random.choice(self.peers)
|
||||||
|
|
||||||
|
iterator peers*(p: PeerPool): Peer =
|
||||||
|
for remote, peer in p.connectedNodes:
|
||||||
|
yield peer
|
||||||
|
|
||||||
|
iterator peers*(p: PeerPool, Protocol: type): Peer =
|
||||||
|
for peer in p.peers:
|
||||||
|
if peer.supports(Protocol):
|
||||||
|
yield peer
|
||||||
|
|
|
@ -0,0 +1,172 @@
|
||||||
|
import
|
||||||
|
deques, tables,
|
||||||
|
package_visible_types,
|
||||||
|
eth/[rlp, keys], asyncdispatch2, eth/common/eth_types,
|
||||||
|
../enode, ../kademlia, ../discovery, ../options, ../rlpxcrypt
|
||||||
|
|
||||||
|
const
|
||||||
|
useSnappy* = defined(useSnappy)
|
||||||
|
|
||||||
|
type
|
||||||
|
EthereumNode* = ref object
|
||||||
|
networkId*: uint
|
||||||
|
chain*: AbstractChainDB
|
||||||
|
clientId*: string
|
||||||
|
connectionState*: ConnectionState
|
||||||
|
keys*: KeyPair
|
||||||
|
address*: Address
|
||||||
|
peerPool*: PeerPool
|
||||||
|
|
||||||
|
# Private fields:
|
||||||
|
capabilities*: seq[Capability]
|
||||||
|
protocols*: seq[ProtocolInfo]
|
||||||
|
listeningServer*: StreamServer
|
||||||
|
protocolStates*: seq[RootRef]
|
||||||
|
discovery*: DiscoveryProtocol
|
||||||
|
when useSnappy:
|
||||||
|
protocolVersion*: uint
|
||||||
|
|
||||||
|
Peer* = ref object
|
||||||
|
remote*: Node
|
||||||
|
network*: EthereumNode
|
||||||
|
|
||||||
|
# Private fields:
|
||||||
|
transport*: StreamTransport
|
||||||
|
dispatcher*: Dispatcher
|
||||||
|
lastReqId*: int
|
||||||
|
secretsState*: SecretState
|
||||||
|
connectionState*: ConnectionState
|
||||||
|
protocolStates*: seq[RootRef]
|
||||||
|
outstandingRequests*: seq[Deque[OutstandingRequest]]
|
||||||
|
awaitedMessages*: seq[FutureBase]
|
||||||
|
when useSnappy:
|
||||||
|
snappyEnabled*: bool
|
||||||
|
|
||||||
|
PeerPool* = ref object
|
||||||
|
# Private fields:
|
||||||
|
network*: EthereumNode
|
||||||
|
keyPair*: KeyPair
|
||||||
|
networkId*: uint
|
||||||
|
minPeers*: int
|
||||||
|
clientId*: string
|
||||||
|
discovery*: DiscoveryProtocol
|
||||||
|
lastLookupTime*: float
|
||||||
|
connectedNodes*: Table[Node, Peer]
|
||||||
|
connectingNodes*: HashSet[Node]
|
||||||
|
running*: bool
|
||||||
|
listenPort*: Port
|
||||||
|
observers*: Table[int, PeerObserver]
|
||||||
|
|
||||||
|
PeerObserver* = object
|
||||||
|
onPeerConnected*: proc(p: Peer) {.gcsafe.}
|
||||||
|
onPeerDisconnected*: proc(p: Peer) {.gcsafe.}
|
||||||
|
|
||||||
|
Capability* = object
|
||||||
|
name*: string
|
||||||
|
version*: int
|
||||||
|
|
||||||
|
UnsupportedProtocol* = object of Exception
|
||||||
|
# This is raised when you attempt to send a message from a particular
|
||||||
|
# protocol to a peer that doesn't support the protocol.
|
||||||
|
|
||||||
|
MalformedMessageError* = object of Exception
|
||||||
|
|
||||||
|
PeerDisconnected* = object of Exception
|
||||||
|
reason*: DisconnectionReason
|
||||||
|
|
||||||
|
UselessPeerError* = object of Exception
|
||||||
|
|
||||||
|
##
|
||||||
|
## Quasy-private types. Use at your own risk.
|
||||||
|
##
|
||||||
|
|
||||||
|
ProtocolInfoObj* = object
|
||||||
|
name*: string
|
||||||
|
version*: int
|
||||||
|
messages*: seq[MessageInfo]
|
||||||
|
index*: int # the position of the protocol in the
|
||||||
|
# ordered list of supported protocols
|
||||||
|
|
||||||
|
# Private fields:
|
||||||
|
peerStateInitializer*: PeerStateInitializer
|
||||||
|
networkStateInitializer*: NetworkStateInitializer
|
||||||
|
handshake*: HandshakeStep
|
||||||
|
disconnectHandler*: DisconnectionHandler
|
||||||
|
|
||||||
|
ProtocolInfo* = ptr ProtocolInfoObj
|
||||||
|
|
||||||
|
MessageInfo* = object
|
||||||
|
id*: int
|
||||||
|
name*: string
|
||||||
|
|
||||||
|
# Private fields:
|
||||||
|
thunk*: MessageHandler
|
||||||
|
printer*: MessageContentPrinter
|
||||||
|
requestResolver*: RequestResolver
|
||||||
|
nextMsgResolver*: NextMsgResolver
|
||||||
|
|
||||||
|
Dispatcher* = ref object # private
|
||||||
|
# The dispatcher stores the mapping of negotiated message IDs between
|
||||||
|
# two connected peers. The dispatcher may be shared between connections
|
||||||
|
# running with the same set of supported protocols.
|
||||||
|
#
|
||||||
|
# `protocolOffsets` will hold one slot of each locally supported
|
||||||
|
# protocol. If the other peer also supports the protocol, the stored
|
||||||
|
# offset indicates the numeric value of the first message of the protocol
|
||||||
|
# (for this particular connection). If the other peer doesn't support the
|
||||||
|
# particular protocol, the stored offset is -1.
|
||||||
|
#
|
||||||
|
# `messages` holds a mapping from valid message IDs to their handler procs.
|
||||||
|
#
|
||||||
|
protocolOffsets*: seq[int]
|
||||||
|
messages*: seq[ptr MessageInfo]
|
||||||
|
activeProtocols*: seq[ProtocolInfo]
|
||||||
|
|
||||||
|
##
|
||||||
|
## Private types:
|
||||||
|
##
|
||||||
|
|
||||||
|
OutstandingRequest* = object
|
||||||
|
id*: int
|
||||||
|
future*: FutureBase
|
||||||
|
timeoutAt*: uint64
|
||||||
|
|
||||||
|
# Private types:
|
||||||
|
MessageHandlerDecorator* = proc(msgId: int, n: NimNode): NimNode
|
||||||
|
MessageHandler* = proc(x: Peer, msgId: int, data: Rlp): Future[void] {.gcsafe.}
|
||||||
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
||||||
|
RequestResolver* = proc(msg: pointer, future: FutureBase) {.gcsafe.}
|
||||||
|
NextMsgResolver* = proc(msgData: Rlp, future: FutureBase) {.gcsafe.}
|
||||||
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
||||||
|
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
||||||
|
HandshakeStep* = proc(peer: Peer): Future[void] {.gcsafe.}
|
||||||
|
DisconnectionHandler* = proc(peer: Peer,
|
||||||
|
reason: DisconnectionReason): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
RlpxMessageKind* = enum
|
||||||
|
rlpxNotification,
|
||||||
|
rlpxRequest,
|
||||||
|
rlpxResponse
|
||||||
|
|
||||||
|
ConnectionState* = enum
|
||||||
|
None,
|
||||||
|
Connecting,
|
||||||
|
Connected,
|
||||||
|
Disconnecting,
|
||||||
|
Disconnected
|
||||||
|
|
||||||
|
DisconnectionReason* = enum
|
||||||
|
DisconnectRequested,
|
||||||
|
TcpError,
|
||||||
|
BreachOfProtocol,
|
||||||
|
UselessPeer,
|
||||||
|
TooManyPeers,
|
||||||
|
AlreadyConnected,
|
||||||
|
IncompatibleProtocolVersion,
|
||||||
|
NullNodeIdentityReceived,
|
||||||
|
ClientQuitting,
|
||||||
|
UnexpectedIdentity,
|
||||||
|
SelfConnection,
|
||||||
|
MessageTimeout,
|
||||||
|
SubprotocolReason = 0x10
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,113 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
## This module implements the Ethereum Wire Protocol:
|
||||||
|
## https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol
|
||||||
|
|
||||||
|
import
|
||||||
|
asyncdispatch2, stint, chronicles, rlp, eth/common/eth_types,
|
||||||
|
../rlpx, ../private/p2p_types, ../blockchain_utils, ../../p2p
|
||||||
|
|
||||||
|
type
|
||||||
|
NewBlockHashesAnnounce* = object
|
||||||
|
hash: KeccakHash
|
||||||
|
number: uint
|
||||||
|
|
||||||
|
NewBlockAnnounce* = object
|
||||||
|
header*: BlockHeader
|
||||||
|
body* {.rlpInline.}: BlockBody
|
||||||
|
|
||||||
|
PeerState = ref object
|
||||||
|
initialized*: bool
|
||||||
|
bestBlockHash*: KeccakHash
|
||||||
|
bestDifficulty*: DifficultyInt
|
||||||
|
|
||||||
|
const
|
||||||
|
maxStateFetch* = 384
|
||||||
|
maxBodiesFetch* = 128
|
||||||
|
maxReceiptsFetch* = 256
|
||||||
|
maxHeadersFetch* = 192
|
||||||
|
protocolVersion* = 63
|
||||||
|
|
||||||
|
p2pProtocol eth(version = protocolVersion,
|
||||||
|
peerState = PeerState,
|
||||||
|
useRequestIds = false):
|
||||||
|
|
||||||
|
onPeerConnected do (peer: Peer):
|
||||||
|
let
|
||||||
|
network = peer.network
|
||||||
|
chain = network.chain
|
||||||
|
bestBlock = chain.getBestBlockHeader
|
||||||
|
|
||||||
|
await peer.status(protocolVersion,
|
||||||
|
network.networkId,
|
||||||
|
bestBlock.difficulty,
|
||||||
|
bestBlock.blockHash,
|
||||||
|
chain.genesisHash)
|
||||||
|
|
||||||
|
let m = await peer.nextMsg(eth.status)
|
||||||
|
if m.networkId == network.networkId and m.genesisHash == chain.genesisHash:
|
||||||
|
trace "suitable peer", peer
|
||||||
|
else:
|
||||||
|
raise newException(UselessPeerError, "Eth handshake params mismatch")
|
||||||
|
peer.state.initialized = true
|
||||||
|
peer.state.bestDifficulty = m.totalDifficulty
|
||||||
|
peer.state.bestBlockHash = m.bestHash
|
||||||
|
|
||||||
|
proc status(peer: Peer,
|
||||||
|
protocolVersion: uint,
|
||||||
|
networkId: uint,
|
||||||
|
totalDifficulty: DifficultyInt,
|
||||||
|
bestHash: KeccakHash,
|
||||||
|
genesisHash: KeccakHash)
|
||||||
|
|
||||||
|
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getBlockHeaders(peer: Peer, request: BlocksRequest) {.gcsafe.} =
|
||||||
|
if request.maxResults > uint64(maxHeadersFetch):
|
||||||
|
await peer.disconnect(BreachOfProtocol)
|
||||||
|
return
|
||||||
|
|
||||||
|
await peer.blockHeaders(peer.network.chain.getBlockHeaders(request))
|
||||||
|
|
||||||
|
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) {.gcsafe.} =
|
||||||
|
if hashes.len > maxBodiesFetch:
|
||||||
|
await peer.disconnect(BreachOfProtocol)
|
||||||
|
return
|
||||||
|
|
||||||
|
await peer.blockBodies(peer.network.chain.getBlockBodies(hashes))
|
||||||
|
|
||||||
|
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
||||||
|
|
||||||
|
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
nextID 13
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||||
|
await peer.nodeData(peer.network.chain.getStorageNodes(hashes))
|
||||||
|
|
||||||
|
proc nodeData(peer: Peer, data: openarray[Blob])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||||
|
await peer.receipts(peer.network.chain.getReceipts(hashes))
|
||||||
|
|
||||||
|
proc receipts(peer: Peer, receipts: openarray[Receipt])
|
||||||
|
|
|
@ -0,0 +1,501 @@
|
||||||
|
import
|
||||||
|
tables, sets,
|
||||||
|
chronicles, asyncdispatch2, eth/rlp, eth/common/eth_types,
|
||||||
|
../../rlpx, ../../private/p2p_types, private/les_types
|
||||||
|
|
||||||
|
const
|
||||||
|
maxSamples = 100000
|
||||||
|
rechargingScale = 1000000
|
||||||
|
|
||||||
|
lesStatsKey = "les.flow_control.stats"
|
||||||
|
lesStatsVer = 0
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "les flow_control"
|
||||||
|
|
||||||
|
# TODO: move this somewhere
|
||||||
|
proc pop[A, B](t: var Table[A, B], key: A): B =
|
||||||
|
result = t[key]
|
||||||
|
t.del(key)
|
||||||
|
|
||||||
|
when LesTime is SomeInteger:
|
||||||
|
template `/`(lhs, rhs: LesTime): LesTime =
|
||||||
|
lhs div rhs
|
||||||
|
|
||||||
|
when defined(testing):
|
||||||
|
var lesTime* = LesTime(0)
|
||||||
|
template now(): LesTime = lesTime
|
||||||
|
template advanceTime(t) = lesTime += LesTime(t)
|
||||||
|
|
||||||
|
else:
|
||||||
|
import times
|
||||||
|
let startTime = epochTime()
|
||||||
|
|
||||||
|
proc now(): LesTime =
|
||||||
|
return LesTime((times.epochTime() - startTime) * 1000.0)
|
||||||
|
|
||||||
|
proc addSample(ra: var StatsRunningAverage; x, y: float64) =
|
||||||
|
if ra.count >= maxSamples:
|
||||||
|
let decay = float64(ra.count + 1 - maxSamples) / maxSamples
|
||||||
|
template applyDecay(x) = x -= x * decay
|
||||||
|
|
||||||
|
applyDecay ra.sumX
|
||||||
|
applyDecay ra.sumY
|
||||||
|
applyDecay ra.sumXX
|
||||||
|
applyDecay ra.sumXY
|
||||||
|
ra.count = maxSamples - 1
|
||||||
|
|
||||||
|
inc ra.count
|
||||||
|
ra.sumX += x
|
||||||
|
ra.sumY += y
|
||||||
|
ra.sumXX += x * x
|
||||||
|
ra.sumXY += x * y
|
||||||
|
|
||||||
|
proc calc(ra: StatsRunningAverage): tuple[m, b: float] =
|
||||||
|
if ra.count == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
let count = float64(ra.count)
|
||||||
|
let d = count * ra.sumXX - ra.sumX * ra.sumX
|
||||||
|
if d < 0.001:
|
||||||
|
return (m: ra.sumY / count, b: 0.0)
|
||||||
|
|
||||||
|
result.m = (count * ra.sumXY - ra.sumX * ra.sumY) / d
|
||||||
|
result.b = (ra.sumY / count) - (result.m * ra.sumX / count)
|
||||||
|
|
||||||
|
proc currentRequestsCosts*(network: LesNetwork,
|
||||||
|
les: ProtocolInfo): seq[ReqCostInfo] =
|
||||||
|
# Make sure the message costs are already initialized
|
||||||
|
doAssert network.messageStats.len > les.messages[^1].id,
|
||||||
|
"Have you called `initFlowControl`"
|
||||||
|
|
||||||
|
for msg in les.messages:
|
||||||
|
var (m, b) = network.messageStats[msg.id].calc()
|
||||||
|
if m < 0:
|
||||||
|
b += m
|
||||||
|
m = 0
|
||||||
|
|
||||||
|
if b < 0:
|
||||||
|
b = 0
|
||||||
|
|
||||||
|
result.add ReqCostInfo.init(msgId = msg.id,
|
||||||
|
baseCost = ReqCostInt(b * 2),
|
||||||
|
reqCost = ReqCostInt(m * 2))
|
||||||
|
|
||||||
|
proc persistMessageStats*(db: AbstractChainDB,
|
||||||
|
network: LesNetwork) =
|
||||||
|
doAssert db != nil
|
||||||
|
# XXX: Because of the package_visible_types template magic, Nim complains
|
||||||
|
# when we pass the messageStats expression directly to `encodeList`
|
||||||
|
let stats = network.messageStats
|
||||||
|
db.setSetting(lesStatsKey, rlp.encodeList(lesStatsVer, stats))
|
||||||
|
|
||||||
|
proc loadMessageStats*(network: LesNetwork,
|
||||||
|
les: ProtocolInfo,
|
||||||
|
db: AbstractChainDb): bool =
|
||||||
|
block readFromDB:
|
||||||
|
if db == nil:
|
||||||
|
break readFromDB
|
||||||
|
|
||||||
|
var stats = db.getSetting(lesStatsKey)
|
||||||
|
if stats.len == 0:
|
||||||
|
notice "LES stats not present in the database"
|
||||||
|
break readFromDB
|
||||||
|
|
||||||
|
try:
|
||||||
|
var statsRlp = rlpFromBytes(stats.toRange)
|
||||||
|
statsRlp.enterList
|
||||||
|
|
||||||
|
let version = statsRlp.read(int)
|
||||||
|
if version != lesStatsVer:
|
||||||
|
notice "Found outdated LES stats record"
|
||||||
|
break readFromDB
|
||||||
|
|
||||||
|
statsRlp >> network.messageStats
|
||||||
|
if network.messageStats.len <= les.messages[^1].id:
|
||||||
|
notice "Found incomplete LES stats record"
|
||||||
|
break readFromDB
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
except RlpError:
|
||||||
|
error "Error while loading LES message stats",
|
||||||
|
err = getCurrentExceptionMsg()
|
||||||
|
|
||||||
|
newSeq(network.messageStats, les.messages[^1].id + 1)
|
||||||
|
return false
|
||||||
|
|
||||||
|
proc update(s: var FlowControlState, t: LesTime) =
|
||||||
|
let dt = max(t - s.lastUpdate, LesTime(0))
|
||||||
|
|
||||||
|
s.bufValue = min(
|
||||||
|
s.bufValue + s.minRecharge * dt,
|
||||||
|
s.bufLimit)
|
||||||
|
|
||||||
|
s.lastUpdate = t
|
||||||
|
|
||||||
|
proc init(s: var FlowControlState,
|
||||||
|
bufLimit: BufValueInt, minRecharge: int, t: LesTime) =
|
||||||
|
s.bufValue = bufLimit
|
||||||
|
s.bufLimit = bufLimit
|
||||||
|
s.minRecharge = minRecharge
|
||||||
|
s.lastUpdate = t
|
||||||
|
|
||||||
|
func canMakeRequest(s: FlowControlState,
|
||||||
|
maxCost: ReqCostInt): (LesTime, float64) =
|
||||||
|
## Returns the required waiting time before sending a request and
|
||||||
|
## the estimated buffer level afterwards (as a fraction of the limit)
|
||||||
|
const safetyMargin = 50
|
||||||
|
|
||||||
|
var maxCost = min(
|
||||||
|
maxCost + safetyMargin * s.minRecharge,
|
||||||
|
s.bufLimit)
|
||||||
|
|
||||||
|
if s.bufValue >= maxCost:
|
||||||
|
result[1] = float64(s.bufValue - maxCost) / float64(s.bufLimit)
|
||||||
|
else:
|
||||||
|
result[0] = (maxCost - s.bufValue) / s.minRecharge
|
||||||
|
|
||||||
|
func canServeRequest(srv: LesNetwork): bool =
|
||||||
|
result = srv.reqCount < srv.maxReqCount and
|
||||||
|
srv.reqCostSum < srv.maxReqCostSum
|
||||||
|
|
||||||
|
proc rechargeReqCost(peer: LesPeer, t: LesTime) =
|
||||||
|
let dt = t - peer.lastRechargeTime
|
||||||
|
peer.reqCostVal += peer.reqCostGradient * dt / rechargingScale
|
||||||
|
peer.lastRechargeTime = t
|
||||||
|
if peer.isRecharging and t >= peer.rechargingEndsAt:
|
||||||
|
peer.isRecharging = false
|
||||||
|
peer.reqCostGradient = 0
|
||||||
|
peer.reqCostVal = 0
|
||||||
|
|
||||||
|
proc updateRechargingParams(peer: LesPeer, network: LesNetwork) =
|
||||||
|
peer.reqCostGradient = 0
|
||||||
|
if peer.reqCount > 0:
|
||||||
|
peer.reqCostGradient = rechargingScale / network.reqCount
|
||||||
|
|
||||||
|
if peer.isRecharging:
|
||||||
|
peer.reqCostGradient = (network.rechargingRate * peer.rechargingPower /
|
||||||
|
network.totalRechargingPower )
|
||||||
|
|
||||||
|
peer.rechargingEndsAt = peer.lastRechargeTime +
|
||||||
|
LesTime(peer.reqCostVal * rechargingScale /
|
||||||
|
-peer.reqCostGradient )
|
||||||
|
|
||||||
|
proc trackRequests(network: LesNetwork, peer: LesPeer, reqCountChange: int) =
|
||||||
|
peer.reqCount += reqCountChange
|
||||||
|
network.reqCount += reqCountChange
|
||||||
|
|
||||||
|
doAssert peer.reqCount >= 0 and network.reqCount >= 0
|
||||||
|
|
||||||
|
if peer.reqCount == 0:
|
||||||
|
# All requests have been finished. Start recharging.
|
||||||
|
peer.isRecharging = true
|
||||||
|
network.totalRechargingPower += peer.rechargingPower
|
||||||
|
elif peer.reqCount == reqCountChange and peer.isRecharging:
|
||||||
|
# `peer.reqCount` must have been 0 for the condition above to hold.
|
||||||
|
# This is a transition from recharging to serving state.
|
||||||
|
peer.isRecharging = false
|
||||||
|
network.totalRechargingPower -= peer.rechargingPower
|
||||||
|
peer.startReqCostVal = peer.reqCostVal
|
||||||
|
|
||||||
|
updateRechargingParams peer, network
|
||||||
|
|
||||||
|
proc updateFlowControl(network: LesNetwork, t: LesTime) =
|
||||||
|
while true:
|
||||||
|
var firstTime = t
|
||||||
|
for peer in network.peers:
|
||||||
|
# TODO: perhaps use a bin heap here
|
||||||
|
if peer.isRecharging and peer.rechargingEndsAt < firstTime:
|
||||||
|
firstTime = peer.rechargingEndsAt
|
||||||
|
|
||||||
|
let rechargingEndedForSomePeer = firstTime < t
|
||||||
|
|
||||||
|
network.reqCostSum = 0
|
||||||
|
for peer in network.peers:
|
||||||
|
peer.rechargeReqCost firstTime
|
||||||
|
network.reqCostSum += peer.reqCostVal
|
||||||
|
|
||||||
|
if rechargingEndedForSomePeer:
|
||||||
|
for peer in network.peers:
|
||||||
|
if peer.isRecharging:
|
||||||
|
updateRechargingParams peer, network
|
||||||
|
else:
|
||||||
|
network.lastUpdate = t
|
||||||
|
return
|
||||||
|
|
||||||
|
proc endPendingRequest*(network: LesNetwork, peer: LesPeer, t: LesTime) =
|
||||||
|
if peer.reqCount > 0:
|
||||||
|
network.updateFlowControl t
|
||||||
|
network.trackRequests peer, -1
|
||||||
|
network.updateFlowControl t
|
||||||
|
|
||||||
|
proc enlistInFlowControl*(network: LesNetwork,
|
||||||
|
peer: LesPeer,
|
||||||
|
peerRechargingPower = 100) =
|
||||||
|
let t = now()
|
||||||
|
|
||||||
|
assert peer.isServer or peer.isClient
|
||||||
|
# Each Peer must be potential communication partner for us.
|
||||||
|
# There will be useless peers on the network, but the logic
|
||||||
|
# should make sure to disconnect them earlier in `onPeerConnected`.
|
||||||
|
|
||||||
|
if peer.isServer:
|
||||||
|
peer.localFlowState.init network.bufferLimit, network.minRechargingRate, t
|
||||||
|
peer.pendingReqs = initTable[int, ReqCostInt]()
|
||||||
|
|
||||||
|
if peer.isClient:
|
||||||
|
peer.remoteFlowState.init network.bufferLimit, network.minRechargingRate, t
|
||||||
|
peer.lastRechargeTime = t
|
||||||
|
peer.rechargingEndsAt = t
|
||||||
|
peer.rechargingPower = peerRechargingPower
|
||||||
|
|
||||||
|
network.updateFlowControl t
|
||||||
|
|
||||||
|
proc delistFromFlowControl*(network: LesNetwork, peer: LesPeer) =
|
||||||
|
let t = now()
|
||||||
|
|
||||||
|
# XXX: perhaps this is not safe with our reqCount logic.
|
||||||
|
# The original code may depend on the binarity of the `serving` flag.
|
||||||
|
network.endPendingRequest peer, t
|
||||||
|
network.updateFlowControl t
|
||||||
|
|
||||||
|
proc initFlowControl*(network: LesNetwork, les: ProtocolInfo,
|
||||||
|
maxReqCount, maxReqCostSum, reqCostTarget: int,
|
||||||
|
db: AbstractChainDb = nil) =
|
||||||
|
network.rechargingRate = (rechargingScale * rechargingScale) /
|
||||||
|
(100 * rechargingScale / reqCostTarget - rechargingScale)
|
||||||
|
network.maxReqCount = maxReqCount
|
||||||
|
network.maxReqCostSum = maxReqCostSum
|
||||||
|
|
||||||
|
if not network.loadMessageStats(les, db):
|
||||||
|
warn "Failed to load persisted LES message stats. " &
|
||||||
|
"Flow control will be re-initilized."
|
||||||
|
|
||||||
|
proc canMakeRequest(peer: var LesPeer, maxCost: int): (LesTime, float64) =
|
||||||
|
peer.localFlowState.update now()
|
||||||
|
return peer.localFlowState.canMakeRequest(maxCost)
|
||||||
|
|
||||||
|
template getRequestCost(peer: LesPeer, localOrRemote: untyped,
|
||||||
|
msgId, costQuantity: int): ReqCostInt =
|
||||||
|
template msgCostInfo: untyped = peer.`localOrRemote ReqCosts`[msgId]
|
||||||
|
|
||||||
|
min(msgCostInfo.baseCost + msgCostInfo.reqCost * costQuantity,
|
||||||
|
peer.`localOrRemote FlowState`.bufLimit)
|
||||||
|
|
||||||
|
proc trackOutgoingRequest*(network: LesNetwork, peer: LesPeer,
|
||||||
|
msgId, reqId, costQuantity: int) =
|
||||||
|
let maxCost = peer.getRequestCost(local, msgId, costQuantity)
|
||||||
|
|
||||||
|
peer.localFlowState.bufValue -= maxCost
|
||||||
|
peer.pendingReqsCost += maxCost
|
||||||
|
peer.pendingReqs[reqId] = peer.pendingReqsCost
|
||||||
|
|
||||||
|
proc trackIncomingResponse*(peer: LesPeer, reqId: int, bv: BufValueInt) =
|
||||||
|
let bv = min(bv, peer.localFlowState.bufLimit)
|
||||||
|
if not peer.pendingReqs.hasKey(reqId):
|
||||||
|
return
|
||||||
|
|
||||||
|
let costsSumAtSending = peer.pendingReqs.pop(reqId)
|
||||||
|
let costsSumChange = peer.pendingReqsCost - costsSumAtSending
|
||||||
|
|
||||||
|
peer.localFlowState.bufValue = if bv > costsSumChange: bv - costsSumChange
|
||||||
|
else: 0
|
||||||
|
peer.localFlowState.lastUpdate = now()
|
||||||
|
|
||||||
|
proc acceptRequest*(network: LesNetwork, peer: LesPeer,
|
||||||
|
msgId, costQuantity: int): Future[bool] {.async.} =
|
||||||
|
let t = now()
|
||||||
|
let reqCost = peer.getRequestCost(remote, msgId, costQuantity)
|
||||||
|
|
||||||
|
peer.remoteFlowState.update t
|
||||||
|
network.updateFlowControl t
|
||||||
|
|
||||||
|
while not network.canServeRequest:
|
||||||
|
await sleepAsync(10)
|
||||||
|
|
||||||
|
if peer notin network.peers:
|
||||||
|
# The peer was disconnected or the network
|
||||||
|
# was shut down while we waited
|
||||||
|
return false
|
||||||
|
|
||||||
|
network.trackRequests peer, +1
|
||||||
|
network.updateFlowControl network.lastUpdate
|
||||||
|
|
||||||
|
if reqCost > peer.remoteFlowState.bufValue:
|
||||||
|
error "LES peer sent request too early",
|
||||||
|
recharge = (reqCost - peer.remoteFlowState.bufValue) * rechargingScale /
|
||||||
|
peer.remoteFlowState.minRecharge
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc bufValueAfterRequest*(network: LesNetwork, peer: LesPeer,
|
||||||
|
msgId: int, quantity: int): BufValueInt =
|
||||||
|
let t = now()
|
||||||
|
let costs = peer.remoteReqCosts[msgId]
|
||||||
|
var reqCost = costs.baseCost + quantity * costs.reqCost
|
||||||
|
|
||||||
|
peer.remoteFlowState.update t
|
||||||
|
peer.remoteFlowState.bufValue -= reqCost
|
||||||
|
|
||||||
|
network.endPendingRequest peer, t
|
||||||
|
|
||||||
|
let curReqCost = peer.reqCostVal
|
||||||
|
if curReqCost < peer.remoteFlowState.bufLimit:
|
||||||
|
let bv = peer.remoteFlowState.bufLimit - curReqCost
|
||||||
|
if bv > peer.remoteFlowState.bufValue:
|
||||||
|
peer.remoteFlowState.bufValue = bv
|
||||||
|
|
||||||
|
network.messageStats[msgId].addSample(float64(quantity),
|
||||||
|
float64(curReqCost - peer.startReqCostVal))
|
||||||
|
|
||||||
|
return peer.remoteFlowState.bufValue
|
||||||
|
|
||||||
|
when defined(testing):
|
||||||
|
import unittest, random, ../../rlpx
|
||||||
|
|
||||||
|
proc isMax(s: FlowControlState): bool =
|
||||||
|
s.bufValue == s.bufLimit
|
||||||
|
|
||||||
|
p2pProtocol dummyLes(version = 1, shortName = "abc"):
|
||||||
|
proc a(p: Peer)
|
||||||
|
proc b(p: Peer)
|
||||||
|
proc c(p: Peer)
|
||||||
|
proc d(p: Peer)
|
||||||
|
proc e(p: Peer)
|
||||||
|
|
||||||
|
template fequals(lhs, rhs: float64, epsilon = 0.0001): bool =
|
||||||
|
abs(lhs-rhs) < epsilon
|
||||||
|
|
||||||
|
proc tests* =
|
||||||
|
randomize(3913631)
|
||||||
|
|
||||||
|
suite "les flow control":
|
||||||
|
suite "running averages":
|
||||||
|
test "consistent costs":
|
||||||
|
var s: StatsRunningAverage
|
||||||
|
for i in 0..100:
|
||||||
|
s.addSample(5.0, 100.0)
|
||||||
|
|
||||||
|
let (cost, base) = s.calc
|
||||||
|
|
||||||
|
check:
|
||||||
|
fequals(cost, 100.0)
|
||||||
|
fequals(base, 0.0)
|
||||||
|
|
||||||
|
test "randomized averages":
|
||||||
|
proc performTest(qBase, qRandom: int, cBase, cRandom: float64) =
|
||||||
|
var
|
||||||
|
s: StatsRunningAverage
|
||||||
|
expectedFinalCost = cBase + cRandom / 2
|
||||||
|
error = expectedFinalCost
|
||||||
|
|
||||||
|
for samples in [100, 1000, 10000]:
|
||||||
|
for i in 0..samples:
|
||||||
|
let q = float64(qBase + rand(10))
|
||||||
|
s.addSample(q, q * (cBase + rand(cRandom)))
|
||||||
|
|
||||||
|
let (newCost, newBase) = s.calc
|
||||||
|
# With more samples, our error should decrease, getting
|
||||||
|
# closer and closer to the average (unless we are already close enough)
|
||||||
|
let newError = abs(newCost - expectedFinalCost)
|
||||||
|
check newError < error
|
||||||
|
error = newError
|
||||||
|
|
||||||
|
# After enough samples we should be very close the the final result
|
||||||
|
check error < (expectedFinalCost * 0.02)
|
||||||
|
|
||||||
|
performTest(1, 10, 5.0, 100.0)
|
||||||
|
performTest(1, 4, 200.0, 1000.0)
|
||||||
|
|
||||||
|
suite "buffer value calculations":
|
||||||
|
type TestReq = object
|
||||||
|
peer: LesPeer
|
||||||
|
msgId, quantity: int
|
||||||
|
accepted: bool
|
||||||
|
|
||||||
|
setup:
|
||||||
|
var lesNetwork = new LesNetwork
|
||||||
|
lesNetwork.peers = initSet[LesPeer]()
|
||||||
|
lesNetwork.initFlowControl(dummyLes.protocolInfo,
|
||||||
|
reqCostTarget = 300,
|
||||||
|
maxReqCount = 5,
|
||||||
|
maxReqCostSum = 1000)
|
||||||
|
|
||||||
|
for i in 0 ..< lesNetwork.messageStats.len:
|
||||||
|
lesNetwork.messageStats[i].addSample(1.0, float(i) * 100.0)
|
||||||
|
|
||||||
|
var client = new LesPeer
|
||||||
|
client.isClient = true
|
||||||
|
|
||||||
|
var server = new LesPeer
|
||||||
|
server.isServer = true
|
||||||
|
|
||||||
|
var clientServer = new LesPeer
|
||||||
|
clientServer.isClient = true
|
||||||
|
clientServer.isServer = true
|
||||||
|
|
||||||
|
var client2 = new LesPeer
|
||||||
|
client2.isClient = true
|
||||||
|
|
||||||
|
var client3 = new LesPeer
|
||||||
|
client3.isClient = true
|
||||||
|
|
||||||
|
var bv: BufValueInt
|
||||||
|
|
||||||
|
template enlist(peer: LesPeer) {.dirty.} =
|
||||||
|
let reqCosts = currentRequestsCosts(lesNetwork, dummyLes.protocolInfo)
|
||||||
|
peer.remoteReqCosts = reqCosts
|
||||||
|
peer.localReqCosts = reqCosts
|
||||||
|
lesNetwork.peers.incl peer
|
||||||
|
lesNetwork.enlistInFlowControl peer
|
||||||
|
|
||||||
|
template startReq(p: LesPeer, msg, q: int): TestReq =
|
||||||
|
var req: TestReq
|
||||||
|
req.peer = p
|
||||||
|
req.msgId = msg
|
||||||
|
req.quantity = q
|
||||||
|
req.accepted = waitFor lesNetwork.acceptRequest(p, msg, q)
|
||||||
|
req
|
||||||
|
|
||||||
|
template endReq(req: TestReq): BufValueInt =
|
||||||
|
bufValueAfterRequest(lesNetwork, req.peer, req.msgId, req.quantity)
|
||||||
|
|
||||||
|
test "single peer recharging":
|
||||||
|
lesNetwork.bufferLimit = 1000
|
||||||
|
lesNetwork.minRechargingRate = 100
|
||||||
|
|
||||||
|
enlist client
|
||||||
|
|
||||||
|
check:
|
||||||
|
client.remoteFlowState.isMax
|
||||||
|
client.rechargingPower > 0
|
||||||
|
|
||||||
|
advanceTime 100
|
||||||
|
|
||||||
|
let r1 = client.startReq(0, 100)
|
||||||
|
check r1.accepted
|
||||||
|
check client.isRecharging == false
|
||||||
|
|
||||||
|
advanceTime 50
|
||||||
|
|
||||||
|
let r2 = client.startReq(1, 1)
|
||||||
|
check r2.accepted
|
||||||
|
check client.isRecharging == false
|
||||||
|
|
||||||
|
advanceTime 25
|
||||||
|
bv = endReq r2
|
||||||
|
check client.isRecharging == false
|
||||||
|
|
||||||
|
advanceTime 130
|
||||||
|
bv = endReq r1
|
||||||
|
check client.isRecharging == true
|
||||||
|
|
||||||
|
advanceTime 300
|
||||||
|
lesNetwork.updateFlowControl now()
|
||||||
|
|
||||||
|
check:
|
||||||
|
client.isRecharging == false
|
||||||
|
client.remoteFlowState.isMax
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
import
|
||||||
|
hashes, tables, sets,
|
||||||
|
package_visible_types,
|
||||||
|
eth/common/eth_types
|
||||||
|
|
||||||
|
packageTypes:
|
||||||
|
type
|
||||||
|
AnnounceType* = enum
|
||||||
|
None,
|
||||||
|
Simple,
|
||||||
|
Signed,
|
||||||
|
Unspecified
|
||||||
|
|
||||||
|
ReqCostInfo = object
|
||||||
|
msgId: int
|
||||||
|
baseCost, reqCost: ReqCostInt
|
||||||
|
|
||||||
|
FlowControlState = object
|
||||||
|
bufValue, bufLimit: int
|
||||||
|
minRecharge: int
|
||||||
|
lastUpdate: LesTime
|
||||||
|
|
||||||
|
StatsRunningAverage = object
|
||||||
|
sumX, sumY, sumXX, sumXY: float64
|
||||||
|
count: int
|
||||||
|
|
||||||
|
LesPeer* = ref object
|
||||||
|
isServer*: bool
|
||||||
|
isClient*: bool
|
||||||
|
announceType*: AnnounceType
|
||||||
|
|
||||||
|
bestDifficulty*: DifficultyInt
|
||||||
|
bestBlockHash*: KeccakHash
|
||||||
|
bestBlockNumber*: BlockNumber
|
||||||
|
|
||||||
|
hasChainSince: HashOrNum
|
||||||
|
hasStateSince: HashOrNum
|
||||||
|
relaysTransactions: bool
|
||||||
|
|
||||||
|
# The variables below are used to implement the flow control
|
||||||
|
# mechanisms of LES from our point of view as a server.
|
||||||
|
# They describe how much load has been generated by this
|
||||||
|
# particular peer.
|
||||||
|
reqCount: int # How many outstanding requests are there?
|
||||||
|
#
|
||||||
|
rechargingPower: int # Do we give this peer any extra priority
|
||||||
|
# (implemented as a faster recharning rate)
|
||||||
|
# 100 is the default. You can go higher and lower.
|
||||||
|
#
|
||||||
|
isRecharging: bool # This is true while the peer is not making
|
||||||
|
# any requests
|
||||||
|
#
|
||||||
|
reqCostGradient: int # Measures the speed of recharging or accumulating
|
||||||
|
# "requests cost" at any given moment.
|
||||||
|
#
|
||||||
|
reqCostVal: int # The accumulated "requests cost"
|
||||||
|
#
|
||||||
|
rechargingEndsAt: int # When will recharging end?
|
||||||
|
# (the buffer of the Peer will be fully restored)
|
||||||
|
#
|
||||||
|
lastRechargeTime: LesTime # When did we last update the recharging parameters
|
||||||
|
#
|
||||||
|
startReqCostVal: int # TODO
|
||||||
|
|
||||||
|
remoteFlowState: FlowControlState
|
||||||
|
remoteReqCosts: seq[ReqCostInfo]
|
||||||
|
|
||||||
|
# The next variables are used to limit ourselves as a client in order to
|
||||||
|
# not violate the control-flow requirements of the remote LES server.
|
||||||
|
|
||||||
|
pendingReqs: Table[int, ReqCostInt]
|
||||||
|
pendingReqsCost: int
|
||||||
|
|
||||||
|
localFlowState: FlowControlState
|
||||||
|
localReqCosts: seq[ReqCostInfo]
|
||||||
|
|
||||||
|
LesNetwork* = ref object
|
||||||
|
peers: HashSet[LesPeer]
|
||||||
|
messageStats: seq[StatsRunningAverage]
|
||||||
|
ourAnnounceType*: AnnounceType
|
||||||
|
|
||||||
|
# The fields below are relevant when serving data.
|
||||||
|
bufferLimit: int
|
||||||
|
minRechargingRate: int
|
||||||
|
|
||||||
|
reqCostSum, maxReqCostSum: ReqCostInt
|
||||||
|
reqCount, maxReqCount: int
|
||||||
|
sumWeigth: int
|
||||||
|
|
||||||
|
rechargingRate: int
|
||||||
|
totalRechargedUnits: int
|
||||||
|
totalRechargingPower: int
|
||||||
|
|
||||||
|
lastUpdate: LesTime
|
||||||
|
|
||||||
|
KeyValuePair = object
|
||||||
|
key: string
|
||||||
|
value: Blob
|
||||||
|
|
||||||
|
HandshakeError = object of Exception
|
||||||
|
|
||||||
|
LesTime = int # this is in milliseconds
|
||||||
|
BufValueInt = int
|
||||||
|
ReqCostInt = int
|
||||||
|
|
||||||
|
template hash*(peer: LesPeer): Hash = hash(cast[pointer](peer))
|
||||||
|
|
||||||
|
template areWeServingData*(network: LesNetwork): bool =
|
||||||
|
network.maxReqCount != 0
|
||||||
|
|
||||||
|
template areWeRequestingData*(network: LesNetwork): bool =
|
||||||
|
network.ourAnnounceType != AnnounceType.Unspecified
|
||||||
|
|
|
@ -0,0 +1,464 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
import
|
||||||
|
times, tables, options, sets, hashes, strutils, macros,
|
||||||
|
chronicles, asyncdispatch2, nimcrypto/[keccak, hash],
|
||||||
|
eth/[rlp, keys], eth/common/eth_types,
|
||||||
|
../rlpx, ../kademlia, ../private/p2p_types, ../blockchain_utils,
|
||||||
|
les/private/les_types, les/flow_control
|
||||||
|
|
||||||
|
les_types.forwardPublicTypes
|
||||||
|
|
||||||
|
const
|
||||||
|
lesVersion = 2'u
|
||||||
|
maxHeadersFetch = 192
|
||||||
|
maxBodiesFetch = 32
|
||||||
|
maxReceiptsFetch = 128
|
||||||
|
maxCodeFetch = 64
|
||||||
|
maxProofsFetch = 64
|
||||||
|
maxHeaderProofsFetch = 64
|
||||||
|
maxTransactionsFetch = 64
|
||||||
|
|
||||||
|
# Handshake properties:
|
||||||
|
# https://github.com/zsfelfoldi/go-ethereum/wiki/Light-Ethereum-Subprotocol-(LES)
|
||||||
|
keyProtocolVersion = "protocolVersion"
|
||||||
|
## P: is 1 for the LPV1 protocol version.
|
||||||
|
|
||||||
|
keyNetworkId = "networkId"
|
||||||
|
## P: should be 0 for testnet, 1 for mainnet.
|
||||||
|
|
||||||
|
keyHeadTotalDifficulty = "headTd"
|
||||||
|
## P: Total Difficulty of the best chain.
|
||||||
|
## Integer, as found in block header.
|
||||||
|
|
||||||
|
keyHeadHash = "headHash"
|
||||||
|
## B_32: the hash of the best (i.e. highest TD) known block.
|
||||||
|
|
||||||
|
keyHeadNumber = "headNum"
|
||||||
|
## P: the number of the best (i.e. highest TD) known block.
|
||||||
|
|
||||||
|
keyGenesisHash = "genesisHash"
|
||||||
|
## B_32: the hash of the Genesis block.
|
||||||
|
|
||||||
|
keyServeHeaders = "serveHeaders"
|
||||||
|
## (optional, no value)
|
||||||
|
## present if the peer can serve header chain downloads.
|
||||||
|
|
||||||
|
keyServeChainSince = "serveChainSince"
|
||||||
|
## P (optional)
|
||||||
|
## present if the peer can serve Body/Receipts ODR requests
|
||||||
|
## starting from the given block number.
|
||||||
|
|
||||||
|
keyServeStateSince = "serveStateSince"
|
||||||
|
## P (optional):
|
||||||
|
## present if the peer can serve Proof/Code ODR requests
|
||||||
|
## starting from the given block number.
|
||||||
|
|
||||||
|
keyRelaysTransactions = "txRelay"
|
||||||
|
## (optional, no value)
|
||||||
|
## present if the peer can relay transactions to the ETH network.
|
||||||
|
|
||||||
|
keyFlowControlBL = "flowControl/BL"
|
||||||
|
keyFlowControlMRC = "flowControl/MRC"
|
||||||
|
keyFlowControlMRR = "flowControl/MRR"
|
||||||
|
## see Client Side Flow Control:
|
||||||
|
## https://github.com/zsfelfoldi/go-ethereum/wiki/Client-Side-Flow-Control-model-for-the-LES-protocol
|
||||||
|
|
||||||
|
keyAnnounceType = "announceType"
|
||||||
|
keyAnnounceSignature = "sign"
|
||||||
|
|
||||||
|
proc initProtocolState(network: LesNetwork, node: EthereumNode) {.gcsafe.} =
|
||||||
|
network.peers = initSet[LesPeer]()
|
||||||
|
|
||||||
|
proc addPeer(network: LesNetwork, peer: LesPeer) =
|
||||||
|
network.enlistInFlowControl peer
|
||||||
|
network.peers.incl peer
|
||||||
|
|
||||||
|
proc removePeer(network: LesNetwork, peer: LesPeer) =
|
||||||
|
network.delistFromFlowControl peer
|
||||||
|
network.peers.excl peer
|
||||||
|
|
||||||
|
template costQuantity(quantityExpr, max: untyped) {.pragma.}
|
||||||
|
|
||||||
|
proc getCostQuantity(fn: NimNode): tuple[quantityExpr, maxQuantity: NimNode] =
|
||||||
|
# XXX: `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
||||||
|
# (TODO: file as an issue)
|
||||||
|
let p = fn.pragma
|
||||||
|
assert p.kind == nnkPragma and p.len > 0 and $p[0][0] == "costQuantity"
|
||||||
|
|
||||||
|
result.quantityExpr = p[0][1]
|
||||||
|
result.maxQuantity= p[0][2]
|
||||||
|
|
||||||
|
if result.maxQuantity.kind == nnkExprEqExpr:
|
||||||
|
result.maxQuantity = result.maxQuantity[1]
|
||||||
|
|
||||||
|
macro outgoingRequestDecorator(n: untyped): untyped =
|
||||||
|
result = n
|
||||||
|
let (costQuantity, maxQuantity) = n.getCostQuantity
|
||||||
|
|
||||||
|
result.body.add quote do:
|
||||||
|
trackOutgoingRequest(msgRecipient.networkState(les),
|
||||||
|
msgRecipient.state(les),
|
||||||
|
perProtocolMsgId, reqId, `costQuantity`)
|
||||||
|
# echo result.repr
|
||||||
|
|
||||||
|
macro incomingResponseDecorator(n: untyped): untyped =
|
||||||
|
result = n
|
||||||
|
|
||||||
|
let trackingCall = quote do:
|
||||||
|
trackIncomingResponse(msgSender.state(les), reqId, msg.bufValue)
|
||||||
|
|
||||||
|
result.body.insert(n.body.len - 1, trackingCall)
|
||||||
|
# echo result.repr
|
||||||
|
|
||||||
|
macro incomingRequestDecorator(n: untyped): untyped =
|
||||||
|
result = n
|
||||||
|
let (costQuantity, maxQuantity) = n.getCostQuantity
|
||||||
|
|
||||||
|
template acceptStep(quantityExpr, maxQuantity) {.dirty.} =
|
||||||
|
let requestCostQuantity = quantityExpr
|
||||||
|
if requestCostQuantity > maxQuantity:
|
||||||
|
await peer.disconnect(BreachOfProtocol)
|
||||||
|
return
|
||||||
|
|
||||||
|
let lesPeer = peer.state
|
||||||
|
let lesNetwork = peer.networkState
|
||||||
|
|
||||||
|
if not await acceptRequest(lesNetwork, lesPeer,
|
||||||
|
perProtocolMsgId,
|
||||||
|
requestCostQuantity): return
|
||||||
|
|
||||||
|
result.body.insert(1, getAst(acceptStep(costQuantity, maxQuantity)))
|
||||||
|
# echo result.repr
|
||||||
|
|
||||||
|
template updateBV: BufValueInt =
|
||||||
|
bufValueAfterRequest(lesNetwork, lesPeer,
|
||||||
|
perProtocolMsgId, requestCostQuantity)
|
||||||
|
|
||||||
|
func getValue(values: openarray[KeyValuePair],
|
||||||
|
key: string, T: typedesc): Option[T] =
|
||||||
|
for v in values:
|
||||||
|
if v.key == key:
|
||||||
|
return some(rlp.decode(v.value, T))
|
||||||
|
|
||||||
|
func getRequiredValue(values: openarray[KeyValuePair],
|
||||||
|
key: string, T: typedesc): T =
|
||||||
|
for v in values:
|
||||||
|
if v.key == key:
|
||||||
|
return rlp.decode(v.value, T)
|
||||||
|
|
||||||
|
raise newException(HandshakeError,
|
||||||
|
"Required handshake field " & key & " missing")
|
||||||
|
|
||||||
|
p2pProtocol les(version = lesVersion,
|
||||||
|
peerState = LesPeer,
|
||||||
|
networkState = LesNetwork,
|
||||||
|
outgoingRequestDecorator = outgoingRequestDecorator,
|
||||||
|
incomingRequestDecorator = incomingRequestDecorator,
|
||||||
|
incomingResponseThunkDecorator = incomingResponseDecorator):
|
||||||
|
|
||||||
|
## Handshake
|
||||||
|
##
|
||||||
|
|
||||||
|
proc status(p: Peer, values: openarray[KeyValuePair])
|
||||||
|
|
||||||
|
onPeerConnected do (peer: Peer):
|
||||||
|
let
|
||||||
|
network = peer.network
|
||||||
|
chain = network.chain
|
||||||
|
bestBlock = chain.getBestBlockHeader
|
||||||
|
lesPeer = peer.state
|
||||||
|
lesNetwork = peer.networkState
|
||||||
|
|
||||||
|
template `=>`(k, v: untyped): untyped =
|
||||||
|
KeyValuePair.init(key = k, value = rlp.encode(v))
|
||||||
|
|
||||||
|
var lesProperties = @[
|
||||||
|
keyProtocolVersion => lesVersion,
|
||||||
|
keyNetworkId => network.networkId,
|
||||||
|
keyHeadTotalDifficulty => bestBlock.difficulty,
|
||||||
|
keyHeadHash => bestBlock.blockHash,
|
||||||
|
keyHeadNumber => bestBlock.blockNumber,
|
||||||
|
keyGenesisHash => chain.genesisHash
|
||||||
|
]
|
||||||
|
|
||||||
|
lesPeer.remoteReqCosts = currentRequestsCosts(lesNetwork, les.protocolInfo)
|
||||||
|
|
||||||
|
if lesNetwork.areWeServingData:
|
||||||
|
lesProperties.add [
|
||||||
|
# keyServeHeaders => nil,
|
||||||
|
keyServeChainSince => 0,
|
||||||
|
keyServeStateSince => 0,
|
||||||
|
# keyRelaysTransactions => nil,
|
||||||
|
keyFlowControlBL => lesNetwork.bufferLimit,
|
||||||
|
keyFlowControlMRR => lesNetwork.minRechargingRate,
|
||||||
|
keyFlowControlMRC => lesPeer.remoteReqCosts
|
||||||
|
]
|
||||||
|
|
||||||
|
if lesNetwork.areWeRequestingData:
|
||||||
|
lesProperties.add(keyAnnounceType => lesNetwork.ourAnnounceType)
|
||||||
|
|
||||||
|
let
|
||||||
|
s = await peer.nextMsg(les.status)
|
||||||
|
peerNetworkId = s.values.getRequiredValue(keyNetworkId, uint)
|
||||||
|
peerGenesisHash = s.values.getRequiredValue(keyGenesisHash, KeccakHash)
|
||||||
|
peerLesVersion = s.values.getRequiredValue(keyProtocolVersion, uint)
|
||||||
|
|
||||||
|
template requireCompatibility(peerVar, localVar, varName: untyped) =
|
||||||
|
if localVar != peerVar:
|
||||||
|
raise newException(HandshakeError,
|
||||||
|
"Incompatibility detected! $1 mismatch ($2 != $3)" %
|
||||||
|
[varName, $localVar, $peerVar])
|
||||||
|
|
||||||
|
requireCompatibility(peerLesVersion, lesVersion, "les version")
|
||||||
|
requireCompatibility(peerNetworkId, network.networkId, "network id")
|
||||||
|
requireCompatibility(peerGenesisHash, chain.genesisHash, "genesis hash")
|
||||||
|
|
||||||
|
template `:=`(lhs, key) =
|
||||||
|
lhs = s.values.getRequiredValue(key, type(lhs))
|
||||||
|
|
||||||
|
lesPeer.bestBlockHash := keyHeadHash
|
||||||
|
lesPeer.bestBlockNumber := keyHeadNumber
|
||||||
|
lesPeer.bestDifficulty := keyHeadTotalDifficulty
|
||||||
|
|
||||||
|
let peerAnnounceType = s.values.getValue(keyAnnounceType, AnnounceType)
|
||||||
|
if peerAnnounceType.isSome:
|
||||||
|
lesPeer.isClient = true
|
||||||
|
lesPeer.announceType = peerAnnounceType.get
|
||||||
|
else:
|
||||||
|
lesPeer.announceType = AnnounceType.Simple
|
||||||
|
lesPeer.hasChainSince := keyServeChainSince
|
||||||
|
lesPeer.hasStateSince := keyServeStateSince
|
||||||
|
lesPeer.relaysTransactions := keyRelaysTransactions
|
||||||
|
lesPeer.localFlowState.bufLimit := keyFlowControlBL
|
||||||
|
lesPeer.localFlowState.minRecharge := keyFlowControlMRR
|
||||||
|
lesPeer.localReqCosts := keyFlowControlMRC
|
||||||
|
|
||||||
|
lesNetwork.addPeer lesPeer
|
||||||
|
|
||||||
|
onPeerDisconnected do (peer: Peer, reason: DisconnectionReason) {.gcsafe.}:
|
||||||
|
peer.networkState.removePeer peer.state
|
||||||
|
|
||||||
|
## Header synchronisation
|
||||||
|
##
|
||||||
|
|
||||||
|
proc announce(
|
||||||
|
peer: Peer,
|
||||||
|
headHash: KeccakHash,
|
||||||
|
headNumber: BlockNumber,
|
||||||
|
headTotalDifficulty: DifficultyInt,
|
||||||
|
reorgDepth: BlockNumber,
|
||||||
|
values: openarray[KeyValuePair],
|
||||||
|
announceType: AnnounceType) =
|
||||||
|
|
||||||
|
if peer.state.announceType == AnnounceType.None:
|
||||||
|
error "unexpected announce message", peer
|
||||||
|
return
|
||||||
|
|
||||||
|
if announceType == AnnounceType.Signed:
|
||||||
|
let signature = values.getValue(keyAnnounceSignature, Blob)
|
||||||
|
if signature.isNone:
|
||||||
|
error "missing announce signature"
|
||||||
|
return
|
||||||
|
let sigHash = keccak256.digest rlp.encodeList(headHash,
|
||||||
|
headNumber,
|
||||||
|
headTotalDifficulty)
|
||||||
|
let signerKey = recoverKeyFromSignature(signature.get.initSignature,
|
||||||
|
sigHash)
|
||||||
|
if signerKey.toNodeId != peer.remote.id:
|
||||||
|
error "invalid announce signature"
|
||||||
|
# TODO: should we disconnect this peer?
|
||||||
|
return
|
||||||
|
|
||||||
|
# TODO: handle new block
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getBlockHeaders(
|
||||||
|
peer: Peer,
|
||||||
|
req: BlocksRequest) {.
|
||||||
|
costQuantity(req.maxResults.int, max = maxHeadersFetch).} =
|
||||||
|
|
||||||
|
let headers = peer.network.chain.getBlockHeaders(req)
|
||||||
|
await peer.blockHeaders(reqId, updateBV(), headers)
|
||||||
|
|
||||||
|
proc blockHeaders(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
blocks: openarray[BlockHeader])
|
||||||
|
|
||||||
|
## On-damand data retrieval
|
||||||
|
##
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getBlockBodies(
|
||||||
|
peer: Peer,
|
||||||
|
blocks: openarray[KeccakHash]) {.
|
||||||
|
costQuantity(blocks.len, max = maxBodiesFetch), gcsafe.} =
|
||||||
|
|
||||||
|
let blocks = peer.network.chain.getBlockBodies(blocks)
|
||||||
|
await peer.blockBodies(reqId, updateBV(), blocks)
|
||||||
|
|
||||||
|
proc blockBodies(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
bodies: openarray[BlockBody])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getReceipts(
|
||||||
|
peer: Peer,
|
||||||
|
hashes: openarray[KeccakHash])
|
||||||
|
{.costQuantity(hashes.len, max = maxReceiptsFetch).} =
|
||||||
|
|
||||||
|
let receipts = peer.network.chain.getReceipts(hashes)
|
||||||
|
await peer.receipts(reqId, updateBV(), receipts)
|
||||||
|
|
||||||
|
proc receipts(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
receipts: openarray[Receipt])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getProofs(
|
||||||
|
peer: Peer,
|
||||||
|
proofs: openarray[ProofRequest]) {.
|
||||||
|
costQuantity(proofs.len, max = maxProofsFetch).} =
|
||||||
|
|
||||||
|
let proofs = peer.network.chain.getProofs(proofs)
|
||||||
|
await peer.proofs(reqId, updateBV(), proofs)
|
||||||
|
|
||||||
|
proc proofs(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
proofs: openarray[Blob])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getContractCodes(
|
||||||
|
peer: Peer,
|
||||||
|
reqs: seq[ContractCodeRequest]) {.
|
||||||
|
costQuantity(reqs.len, max = maxCodeFetch).} =
|
||||||
|
|
||||||
|
let results = peer.network.chain.getContractCodes(reqs)
|
||||||
|
await peer.contractCodes(reqId, updateBV(), results)
|
||||||
|
|
||||||
|
proc contractCodes(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
results: seq[Blob])
|
||||||
|
|
||||||
|
nextID 15
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getHeaderProofs(
|
||||||
|
peer: Peer,
|
||||||
|
reqs: openarray[ProofRequest]) {.
|
||||||
|
costQuantity(reqs.len, max = maxHeaderProofsFetch).} =
|
||||||
|
|
||||||
|
let proofs = peer.network.chain.getHeaderProofs(reqs)
|
||||||
|
await peer.headerProofs(reqId, updateBV(), proofs)
|
||||||
|
|
||||||
|
proc headerProofs(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
proofs: openarray[Blob])
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc getHelperTrieProofs(
|
||||||
|
peer: Peer,
|
||||||
|
reqs: openarray[HelperTrieProofRequest]) {.
|
||||||
|
costQuantity(reqs.len, max = maxProofsFetch).} =
|
||||||
|
|
||||||
|
var nodes, auxData: seq[Blob]
|
||||||
|
peer.network.chain.getHelperTrieProofs(reqs, nodes, auxData)
|
||||||
|
await peer.helperTrieProofs(reqId, updateBV(), nodes, auxData)
|
||||||
|
|
||||||
|
proc helperTrieProofs(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
nodes: seq[Blob],
|
||||||
|
auxData: seq[Blob])
|
||||||
|
|
||||||
|
## Transaction relaying and status retrieval
|
||||||
|
##
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc sendTxV2(
|
||||||
|
peer: Peer,
|
||||||
|
transactions: openarray[Transaction]) {.
|
||||||
|
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
||||||
|
|
||||||
|
let chain = peer.network.chain
|
||||||
|
|
||||||
|
var results: seq[TransactionStatusMsg]
|
||||||
|
for t in transactions:
|
||||||
|
let hash = t.rlpHash # TODO: this is not optimal, we can compute
|
||||||
|
# the hash from the request bytes.
|
||||||
|
# The RLP module can offer a helper Hashed[T]
|
||||||
|
# to make this easy.
|
||||||
|
var s = chain.getTransactionStatus(hash)
|
||||||
|
if s.status == TransactionStatus.Unknown:
|
||||||
|
chain.addTransactions([t])
|
||||||
|
s = chain.getTransactionStatus(hash)
|
||||||
|
|
||||||
|
results.add s
|
||||||
|
|
||||||
|
await peer.txStatus(reqId, updateBV(), results)
|
||||||
|
|
||||||
|
proc getTxStatus(
|
||||||
|
peer: Peer,
|
||||||
|
transactions: openarray[Transaction]) {.
|
||||||
|
costQuantity(transactions.len, max = maxTransactionsFetch).} =
|
||||||
|
|
||||||
|
let chain = peer.network.chain
|
||||||
|
|
||||||
|
var results: seq[TransactionStatusMsg]
|
||||||
|
for t in transactions:
|
||||||
|
results.add chain.getTransactionStatus(t.rlpHash)
|
||||||
|
await peer.txStatus(reqId, updateBV(), results)
|
||||||
|
|
||||||
|
proc txStatus(
|
||||||
|
peer: Peer,
|
||||||
|
bufValue: BufValueInt,
|
||||||
|
transactions: openarray[TransactionStatusMsg])
|
||||||
|
|
||||||
|
proc configureLes*(node: EthereumNode,
|
||||||
|
# Client options:
|
||||||
|
announceType = AnnounceType.Simple,
|
||||||
|
# Server options.
|
||||||
|
# The zero default values indicate that the
|
||||||
|
# LES server will be deactivated.
|
||||||
|
maxReqCount = 0,
|
||||||
|
maxReqCostSum = 0,
|
||||||
|
reqCostTarget = 0) =
|
||||||
|
|
||||||
|
doAssert announceType != AnnounceType.Unspecified or maxReqCount > 0
|
||||||
|
|
||||||
|
var lesNetwork = node.protocolState(les)
|
||||||
|
lesNetwork.ourAnnounceType = announceType
|
||||||
|
initFlowControl(lesNetwork, les.protocolInfo,
|
||||||
|
maxReqCount, maxReqCostSum, reqCostTarget,
|
||||||
|
node.chain)
|
||||||
|
|
||||||
|
proc configureLesServer*(node: EthereumNode,
|
||||||
|
# Client options:
|
||||||
|
announceType = AnnounceType.Unspecified,
|
||||||
|
# Server options.
|
||||||
|
# The zero default values indicate that the
|
||||||
|
# LES server will be deactivated.
|
||||||
|
maxReqCount = 0,
|
||||||
|
maxReqCostSum = 0,
|
||||||
|
reqCostTarget = 0) =
|
||||||
|
## This is similar to `configureLes`, but with default parameter
|
||||||
|
## values appropriate for a server.
|
||||||
|
node.configureLes(announceType, maxReqCount, maxReqCostSum, reqCostTarget)
|
||||||
|
|
||||||
|
proc persistLesMessageStats*(node: EthereumNode) =
|
||||||
|
persistMessageStats(node.chain, node.protocolState(les))
|
||||||
|
|
|
@ -0,0 +1,977 @@
|
||||||
|
## Whisper
|
||||||
|
##
|
||||||
|
## Whisper is a gossip protocol that synchronizes a set of messages across nodes
|
||||||
|
## with attention given to sender and recipient anonymitiy. Messages are
|
||||||
|
## categorized by a topic and stay alive in the network based on a time-to-live
|
||||||
|
## measured in seconds. Spam prevention is based on proof-of-work, where large
|
||||||
|
## or long-lived messages must spend more work.
|
||||||
|
|
||||||
|
import
|
||||||
|
algorithm, bitops, endians, math, options, sequtils, strutils, tables, times,
|
||||||
|
secp256k1, chronicles, asyncdispatch2, eth/common/eth_types, eth/[keys, rlp],
|
||||||
|
hashes, byteutils, nimcrypto/[bcmode, hash, keccak, rijndael, sysrand],
|
||||||
|
eth/p2p, ../ecies
|
||||||
|
|
||||||
|
const
|
||||||
|
flagsLen = 1 ## payload flags field length, bytes
|
||||||
|
gcmIVLen = 12 ## Length of IV (seed) used for AES
|
||||||
|
gcmTagLen = 16 ## Length of tag used to authenticate AES-GCM-encrypted message
|
||||||
|
padMaxLen = 256 ## payload will be padded to multiples of this by default
|
||||||
|
payloadLenLenBits = 0b11'u8 ## payload flags length-of-length mask
|
||||||
|
signatureBits = 0b100'u8 ## payload flags signature mask
|
||||||
|
bloomSize = 512 div 8
|
||||||
|
defaultQueueCapacity = 256
|
||||||
|
defaultFilterQueueCapacity = 64
|
||||||
|
whisperVersion* = 6
|
||||||
|
defaultMinPow* = 0.001'f64
|
||||||
|
defaultMaxMsgSize* = 1024'u32 * 1024'u32 # * 10 # should be no higher than max RLPx size
|
||||||
|
messageInterval* = 300 ## Interval at which messages are send to peers, in ms
|
||||||
|
pruneInterval* = 1000 ## Interval at which message queue is pruned, in ms
|
||||||
|
|
||||||
|
type
|
||||||
|
Hash* = MDigest[256]
|
||||||
|
SymKey* = array[256 div 8, byte] ## AES256 key
|
||||||
|
Topic* = array[4, byte]
|
||||||
|
Bloom* = array[bloomSize, byte] ## XXX: nim-eth-bloom has really quirky API and fixed
|
||||||
|
## bloom size.
|
||||||
|
## stint is massive overkill / poor fit - a bloom filter is an array of bits,
|
||||||
|
## not a number
|
||||||
|
|
||||||
|
Payload* = object
|
||||||
|
## Payload is what goes in the data field of the Envelope
|
||||||
|
|
||||||
|
src*: Option[PrivateKey] ## Optional key used for signing message
|
||||||
|
dst*: Option[PublicKey] ## Optional key used for asymmetric encryption
|
||||||
|
symKey*: Option[SymKey] ## Optional key used for symmetric encryption
|
||||||
|
payload*: Bytes ## Application data / message contents
|
||||||
|
padding*: Option[Bytes] ## Padding - if unset, will automatically pad up to
|
||||||
|
## nearest maxPadLen-byte boundary
|
||||||
|
DecodedPayload* = object
|
||||||
|
src*: Option[PublicKey] ## If the message was signed, this is the public key
|
||||||
|
## of the source
|
||||||
|
payload*: Bytes ## Application data / message contents
|
||||||
|
padding*: Option[Bytes] ## Message padding
|
||||||
|
|
||||||
|
Envelope* = object
|
||||||
|
## What goes on the wire in the whisper protocol - a payload and some
|
||||||
|
## book-keeping
|
||||||
|
## Don't touch field order, there's lots of macro magic that depends on it
|
||||||
|
expiry*: uint32 ## Unix timestamp when message expires
|
||||||
|
ttl*: uint32 ## Time-to-live, seconds - message was created at (expiry - ttl)
|
||||||
|
topic*: Topic
|
||||||
|
data*: Bytes ## Payload, as given by user
|
||||||
|
nonce*: uint64 ## Nonce used for proof-of-work calculation
|
||||||
|
|
||||||
|
Message* = object
|
||||||
|
## An Envelope with a few cached properties
|
||||||
|
|
||||||
|
env*: Envelope
|
||||||
|
hash*: Hash ## Hash, as calculated for proof-of-work
|
||||||
|
size*: uint32 ## RLP-encoded size of message
|
||||||
|
pow*: float64 ## Calculated proof-of-work
|
||||||
|
bloom*: Bloom ## Filter sent to direct peers for topic-based filtering
|
||||||
|
isP2P: bool
|
||||||
|
|
||||||
|
ReceivedMessage* = object
|
||||||
|
decoded*: DecodedPayload
|
||||||
|
timestamp*: uint32
|
||||||
|
ttl*: uint32
|
||||||
|
topic*: Topic
|
||||||
|
pow*: float64
|
||||||
|
hash*: Hash
|
||||||
|
|
||||||
|
Queue* = object
|
||||||
|
## Bounded message repository
|
||||||
|
##
|
||||||
|
## Whisper uses proof-of-work to judge the usefulness of a message staying
|
||||||
|
## in the "cloud" - messages with low proof-of-work will be removed to make
|
||||||
|
## room for those with higher pow, even if they haven't expired yet.
|
||||||
|
## Larger messages and those with high time-to-live will require more pow.
|
||||||
|
items*: seq[Message] ## Sorted by proof-of-work
|
||||||
|
itemHashes*: HashSet[Message] ## For easy duplication checking
|
||||||
|
# XXX: itemHashes is added for easy message duplication checking and for
|
||||||
|
# easy pruning of the peer received message sets. It does have an impact on
|
||||||
|
# adding and pruning of items however.
|
||||||
|
# Need to give it some more thought and check where most time is lost in
|
||||||
|
# typical cases, perhaps we are better of with one hash table (lose PoW
|
||||||
|
# sorting however), or perhaps there is a simpler solution...
|
||||||
|
|
||||||
|
capacity*: int ## Max messages to keep. \
|
||||||
|
## XXX: really big messages can cause excessive mem usage when using msg \
|
||||||
|
## count
|
||||||
|
|
||||||
|
FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, closure.}
|
||||||
|
|
||||||
|
Filter* = object
|
||||||
|
src: Option[PublicKey]
|
||||||
|
privateKey: Option[PrivateKey]
|
||||||
|
symKey: Option[SymKey]
|
||||||
|
topics: seq[Topic]
|
||||||
|
powReq: float64
|
||||||
|
allowP2P: bool
|
||||||
|
|
||||||
|
bloom: Bloom # cached bloom filter of all topics of filter
|
||||||
|
handler: FilterMsgHandler
|
||||||
|
queue: seq[ReceivedMessage]
|
||||||
|
|
||||||
|
Filters* = Table[string, Filter]
|
||||||
|
|
||||||
|
WhisperConfig* = object
|
||||||
|
powRequirement*: float64
|
||||||
|
bloom*: Bloom
|
||||||
|
isLightNode*: bool
|
||||||
|
maxMsgSize*: uint32
|
||||||
|
|
||||||
|
# Utilities --------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc toBE(v: uint64): array[8, byte] =
|
||||||
|
# return uint64 as bigendian array - for easy consumption with hash function
|
||||||
|
var v = cast[array[8, byte]](v)
|
||||||
|
bigEndian64(result.addr, v.addr)
|
||||||
|
proc toLE(v: uint32): array[4, byte] =
|
||||||
|
# return uint32 as bigendian array - for easy consumption with hash function
|
||||||
|
var v = cast[array[4, byte]](v)
|
||||||
|
littleEndian32(result.addr, v.addr)
|
||||||
|
|
||||||
|
# XXX: get rid of pointer
|
||||||
|
proc fromLE32(v: array[4, byte]): uint32 =
|
||||||
|
var v = v
|
||||||
|
var ret: array[4, byte]
|
||||||
|
littleEndian32(ret.addr, v.addr)
|
||||||
|
result = cast[uint32](ret)
|
||||||
|
|
||||||
|
proc leadingZeroBits(hash: MDigest): int =
|
||||||
|
## Number of most significant zero bits before the first one
|
||||||
|
for h in hash.data:
|
||||||
|
static: assert sizeof(h) == 1
|
||||||
|
if h == 0:
|
||||||
|
result += 8
|
||||||
|
else:
|
||||||
|
result += countLeadingZeroBits(h)
|
||||||
|
break
|
||||||
|
|
||||||
|
proc calcPow(size, ttl: uint64, hash: Hash): float64 =
|
||||||
|
## Whisper proof-of-work is defined as the best bit of a hash divided by
|
||||||
|
## encoded size and time-to-live, such that large and long-lived messages get
|
||||||
|
## penalized
|
||||||
|
|
||||||
|
let bits = leadingZeroBits(hash) + 1
|
||||||
|
return pow(2.0, bits.float64) / (size.float64 * ttl.float64)
|
||||||
|
|
||||||
|
proc topicBloom*(topic: Topic): Bloom =
|
||||||
|
## Whisper uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit
|
||||||
|
## indexes into the bloom are created using the first 3 bytes of the topic and
|
||||||
|
## complementing each byte with an extra bit from the last topic byte
|
||||||
|
for i in 0..<3:
|
||||||
|
var idx = uint16(topic[i])
|
||||||
|
if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte
|
||||||
|
idx = idx + 256
|
||||||
|
|
||||||
|
assert idx <= 511
|
||||||
|
result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16))
|
||||||
|
|
||||||
|
proc generateRandomID(): string =
|
||||||
|
var bytes: array[256 div 8, byte]
|
||||||
|
while true: # XXX: error instead of looping?
|
||||||
|
if randomBytes(bytes) == 256 div 8:
|
||||||
|
result = toHex(bytes)
|
||||||
|
break
|
||||||
|
|
||||||
|
proc `or`(a, b: Bloom): Bloom =
|
||||||
|
for i in 0..<a.len:
|
||||||
|
result[i] = a[i] or b[i]
|
||||||
|
|
||||||
|
proc bytesCopy(bloom: var Bloom, b: Bytes) =
|
||||||
|
assert b.len == bloomSize
|
||||||
|
copyMem(addr bloom[0], unsafeAddr b[0], bloomSize)
|
||||||
|
|
||||||
|
proc toBloom*(topics: openArray[Topic]): Bloom =
|
||||||
|
for topic in topics:
|
||||||
|
result = result or topicBloom(topic)
|
||||||
|
|
||||||
|
proc bloomFilterMatch(filter, sample: Bloom): bool =
|
||||||
|
for i in 0..<filter.len:
|
||||||
|
if (filter[i] or sample[i]) != filter[i]:
|
||||||
|
return false
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc fullBloom*(): Bloom =
|
||||||
|
# There is no setMem exported in system, assume compiler is smart enough?
|
||||||
|
for i in 0..<result.len:
|
||||||
|
result[i] = 0xFF
|
||||||
|
|
||||||
|
proc encryptAesGcm(plain: openarray[byte], key: SymKey,
|
||||||
|
iv: array[gcmIVLen, byte]): Bytes =
|
||||||
|
## Encrypt using AES-GCM, making sure to append tag and iv, in that order
|
||||||
|
var gcm: GCM[aes256]
|
||||||
|
result = newSeqOfCap[byte](plain.len + gcmTagLen + iv.len)
|
||||||
|
result.setLen plain.len
|
||||||
|
gcm.init(key, iv, [])
|
||||||
|
gcm.encrypt(plain, result)
|
||||||
|
var tag: array[gcmTagLen, byte]
|
||||||
|
gcm.getTag(tag)
|
||||||
|
result.add tag
|
||||||
|
result.add iv
|
||||||
|
|
||||||
|
proc decryptAesGcm(cipher: openarray[byte], key: SymKey): Option[Bytes] =
|
||||||
|
## Decrypt AES-GCM ciphertext and validate authenticity - assumes
|
||||||
|
## cipher-tag-iv format of the buffer
|
||||||
|
if cipher.len < gcmTagLen + gcmIVLen:
|
||||||
|
debug "cipher missing tag/iv", len = cipher.len
|
||||||
|
return
|
||||||
|
let plainLen = cipher.len - gcmTagLen - gcmIVLen
|
||||||
|
var gcm: GCM[aes256]
|
||||||
|
var res = newSeq[byte](plainLen)
|
||||||
|
let iv = cipher[^gcmIVLen .. ^1]
|
||||||
|
let tag = cipher[^(gcmIVLen + gcmTagLen) .. ^(gcmIVLen + 1)]
|
||||||
|
gcm.init(key, iv, [])
|
||||||
|
gcm.decrypt(cipher[0 ..< ^(gcmIVLen + gcmTagLen)], res)
|
||||||
|
var tag2: array[gcmTagLen, byte]
|
||||||
|
gcm.getTag(tag2)
|
||||||
|
|
||||||
|
if tag != tag2:
|
||||||
|
debug "cipher tag mismatch", len = cipher.len, tag, tag2
|
||||||
|
return
|
||||||
|
return some(res)
|
||||||
|
|
||||||
|
# Payloads ---------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Several differences between geth and parity - this code is closer to geth
|
||||||
|
# simply because that makes it closer to EIP 627 - see also:
|
||||||
|
# https://github.com/paritytech/parity-ethereum/issues/9652
|
||||||
|
|
||||||
|
proc encode*(self: Payload): Option[Bytes] =
|
||||||
|
## Encode a payload according so as to make it suitable to put in an Envelope
|
||||||
|
## The format follows EIP 627 - https://eips.ethereum.org/EIPS/eip-627
|
||||||
|
|
||||||
|
# XXX is this limit too high? We could limit it here but the protocol
|
||||||
|
# technically supports it..
|
||||||
|
if self.payload.len >= 256*256*256:
|
||||||
|
notice "Payload exceeds max length", len = self.payload.len
|
||||||
|
return
|
||||||
|
|
||||||
|
# length of the payload length field :)
|
||||||
|
let payloadLenLen =
|
||||||
|
if self.payload.len >= 256*256: 3'u8
|
||||||
|
elif self.payload.len >= 256: 2'u8
|
||||||
|
else: 1'u8
|
||||||
|
|
||||||
|
let signatureLen =
|
||||||
|
if self.src.isSome(): keys.RawSignatureSize
|
||||||
|
else: 0
|
||||||
|
|
||||||
|
# useful data length
|
||||||
|
let dataLen = flagsLen + payloadLenLen.int + self.payload.len + signatureLen
|
||||||
|
|
||||||
|
let padLen =
|
||||||
|
if self.padding.isSome(): self.padding.get().len
|
||||||
|
# is there a reason why 256 bytes are padded when the dataLen is 256?
|
||||||
|
else: padMaxLen - (dataLen mod padMaxLen)
|
||||||
|
|
||||||
|
# buffer space that we need to allocate
|
||||||
|
let totalLen = dataLen + padLen
|
||||||
|
|
||||||
|
var plain = newSeqOfCap[byte](totalLen)
|
||||||
|
|
||||||
|
let signatureFlag =
|
||||||
|
if self.src.isSome(): signatureBits
|
||||||
|
else: 0'u8
|
||||||
|
|
||||||
|
# byte 0: flags with payload length length and presence of signature
|
||||||
|
plain.add payloadLenLen or signatureFlag
|
||||||
|
|
||||||
|
# next, length of payload - little endian (who comes up with this stuff? why
|
||||||
|
# can't the world just settle on one endian?)
|
||||||
|
let payloadLenLE = self.payload.len.uint32.toLE
|
||||||
|
|
||||||
|
# No, I have no love for nim closed ranges - such a mess to remember the extra
|
||||||
|
# < or risk off-by-ones when working with lengths..
|
||||||
|
plain.add payloadLenLE[0..<payloadLenLen]
|
||||||
|
plain.add self.payload
|
||||||
|
|
||||||
|
if self.padding.isSome():
|
||||||
|
plain.add self.padding.get()
|
||||||
|
else:
|
||||||
|
var padding = newSeq[byte](padLen)
|
||||||
|
if randomBytes(padding) != padLen:
|
||||||
|
notice "Generation of random padding failed"
|
||||||
|
return
|
||||||
|
|
||||||
|
plain.add padding
|
||||||
|
|
||||||
|
if self.src.isSome(): # Private key present - signature requested
|
||||||
|
let hash = keccak256.digest(plain)
|
||||||
|
var sig: Signature
|
||||||
|
let err = signRawMessage(hash.data, self.src.get(), sig)
|
||||||
|
if err != EthKeysStatus.Success:
|
||||||
|
notice "Signing message failed", err
|
||||||
|
return
|
||||||
|
|
||||||
|
plain.add sig.getRaw()
|
||||||
|
|
||||||
|
if self.dst.isSome(): # Asymmetric key present - encryption requested
|
||||||
|
var res = newSeq[byte](eciesEncryptedLength(plain.len))
|
||||||
|
let err = eciesEncrypt(plain, res, self.dst.get())
|
||||||
|
if err != EciesStatus.Success:
|
||||||
|
notice "Encryption failed", err
|
||||||
|
return
|
||||||
|
return some(res)
|
||||||
|
|
||||||
|
if self.symKey.isSome(): # Symmetric key present - encryption requested
|
||||||
|
var iv: array[gcmIVLen, byte]
|
||||||
|
if randomBytes(iv) != gcmIVLen:
|
||||||
|
notice "Generation of random IV failed"
|
||||||
|
return
|
||||||
|
|
||||||
|
return some(encryptAesGcm(plain, self.symKey.get(), iv))
|
||||||
|
|
||||||
|
# No encryption!
|
||||||
|
return some(plain)
|
||||||
|
|
||||||
|
proc decode*(data: openarray[byte], dst = none[PrivateKey](),
|
||||||
|
symKey = none[SymKey]()): Option[DecodedPayload] =
|
||||||
|
## Decode data into payload, potentially trying to decrypt if keys are
|
||||||
|
## provided
|
||||||
|
|
||||||
|
# Careful throughout - data coming from unknown source - malformatted data
|
||||||
|
# expected
|
||||||
|
|
||||||
|
var res: DecodedPayload
|
||||||
|
|
||||||
|
var plain: Bytes
|
||||||
|
if dst.isSome():
|
||||||
|
# XXX: eciesDecryptedLength is pretty fragile, API-wise.. is this really the
|
||||||
|
# way to check for errors / sufficient length?
|
||||||
|
let plainLen = eciesDecryptedLength(data.len)
|
||||||
|
if plainLen < 0:
|
||||||
|
debug "Not enough data to decrypt", len = data.len
|
||||||
|
return
|
||||||
|
|
||||||
|
plain.setLen(eciesDecryptedLength(data.len))
|
||||||
|
if eciesDecrypt(data, plain, dst.get()) != EciesStatus.Success:
|
||||||
|
debug "Couldn't decrypt using asymmetric key", len = data.len
|
||||||
|
return
|
||||||
|
elif symKey.isSome():
|
||||||
|
let tmp = decryptAesGcm(data, symKey.get())
|
||||||
|
if tmp.isNone():
|
||||||
|
debug "Couldn't decrypt using symmetric key", len = data.len
|
||||||
|
return
|
||||||
|
|
||||||
|
plain = tmp.get()
|
||||||
|
else: # No encryption!
|
||||||
|
plain = @data
|
||||||
|
|
||||||
|
if plain.len < 2: # Minimum 1 byte flags, 1 byte payload len
|
||||||
|
debug "Missing flags or payload length", len = plain.len
|
||||||
|
return
|
||||||
|
|
||||||
|
var pos = 0
|
||||||
|
|
||||||
|
let payloadLenLen = int(plain[pos] and 0b11'u8)
|
||||||
|
let hasSignature = (plain[pos] and 0b100'u8) != 0
|
||||||
|
|
||||||
|
pos += 1
|
||||||
|
|
||||||
|
if plain.len < pos + payloadLenLen:
|
||||||
|
debug "Missing payload length", len = plain.len, pos, payloadLenLen
|
||||||
|
return
|
||||||
|
|
||||||
|
var payloadLenLE: array[4, byte]
|
||||||
|
|
||||||
|
for i in 0..<payloadLenLen: payloadLenLE[i] = plain[pos + i]
|
||||||
|
pos += payloadLenLen
|
||||||
|
|
||||||
|
let payloadLen = int(payloadLenLE.fromLE32())
|
||||||
|
if plain.len < pos + payloadLen:
|
||||||
|
debug "Missing payload", len = plain.len, pos, payloadLen
|
||||||
|
return
|
||||||
|
|
||||||
|
res.payload = plain[pos ..< pos + payloadLen]
|
||||||
|
|
||||||
|
pos += payloadLen
|
||||||
|
|
||||||
|
if hasSignature:
|
||||||
|
if plain.len < (keys.RawSignatureSize + pos):
|
||||||
|
debug "Missing expected signature", len = plain.len
|
||||||
|
return
|
||||||
|
|
||||||
|
let sig = plain[^keys.RawSignatureSize .. ^1]
|
||||||
|
let hash = keccak256.digest(plain[0 ..< ^keys.RawSignatureSize])
|
||||||
|
var key: PublicKey
|
||||||
|
let err = recoverSignatureKey(sig, hash.data, key)
|
||||||
|
if err != EthKeysStatus.Success:
|
||||||
|
debug "Failed to recover signature key", err
|
||||||
|
return
|
||||||
|
res.src = some(key)
|
||||||
|
|
||||||
|
if hasSignature:
|
||||||
|
if plain.len > pos + keys.RawSignatureSize:
|
||||||
|
res.padding = some(plain[pos .. ^(keys.RawSignatureSize+1)])
|
||||||
|
else:
|
||||||
|
if plain.len > pos:
|
||||||
|
res.padding = some(plain[pos .. ^1])
|
||||||
|
|
||||||
|
return some(res)
|
||||||
|
|
||||||
|
# Envelopes --------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc valid*(self: Envelope, now = epochTime()): bool =
|
||||||
|
if self.expiry.float64 < now: return false # expired
|
||||||
|
if self.ttl <= 0: return false # this would invalidate pow calculation
|
||||||
|
|
||||||
|
let created = self.expiry - self.ttl
|
||||||
|
if created.float64 > (now + 2.0): return false # created in the future
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc toShortRlp(self: Envelope): Bytes =
|
||||||
|
## RLP-encoded message without nonce is used during proof-of-work calculations
|
||||||
|
rlp.encodeList(self.expiry, self.ttl, self.topic, self.data)
|
||||||
|
|
||||||
|
proc toRlp(self: Envelope): Bytes =
|
||||||
|
## What gets sent out over the wire includes the nonce
|
||||||
|
rlp.encode(self)
|
||||||
|
|
||||||
|
# NOTE: minePow and calcPowHash are different from go-ethereum implementation.
|
||||||
|
# Is correct however with EIP-627, but perhaps this is not up to date.
|
||||||
|
# Follow-up here: https://github.com/ethereum/go-ethereum/issues/18070
|
||||||
|
|
||||||
|
proc minePow*(self: Envelope, seconds: float): uint64 =
|
||||||
|
## For the given envelope, spend millis milliseconds to find the
|
||||||
|
## best proof-of-work and return the nonce
|
||||||
|
let bytes = self.toShortRlp()
|
||||||
|
|
||||||
|
var ctx: keccak256
|
||||||
|
ctx.init()
|
||||||
|
ctx.update(bytes)
|
||||||
|
|
||||||
|
var bestPow: float64 = 0.0
|
||||||
|
|
||||||
|
let mineEnd = epochTime() + seconds
|
||||||
|
|
||||||
|
var i: uint64
|
||||||
|
while epochTime() < mineEnd or bestPow == 0: # At least one round
|
||||||
|
var tmp = ctx # copy hash calculated so far - we'll reuse that for each iter
|
||||||
|
tmp.update(i.toBE())
|
||||||
|
# XXX:a random nonce here would not leak number of iters
|
||||||
|
let pow = calcPow(1, 1, tmp.finish())
|
||||||
|
if pow > bestPow: # XXX: could also compare hashes as numbers instead
|
||||||
|
bestPow = pow
|
||||||
|
result = i.uint64
|
||||||
|
|
||||||
|
i.inc
|
||||||
|
|
||||||
|
proc calcPowHash*(self: Envelope): Hash =
|
||||||
|
## Calculate the message hash, as done during mining - this can be used to
|
||||||
|
## verify proof-of-work
|
||||||
|
|
||||||
|
let bytes = self.toShortRlp()
|
||||||
|
|
||||||
|
var ctx: keccak256
|
||||||
|
ctx.init()
|
||||||
|
ctx.update(bytes)
|
||||||
|
ctx.update(self.nonce.toBE())
|
||||||
|
return ctx.finish()
|
||||||
|
|
||||||
|
# Messages ---------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc cmpPow(a, b: Message): int =
|
||||||
|
## Biggest pow first, lowest at the end (for easy popping)
|
||||||
|
if a.pow > b.pow: 1
|
||||||
|
elif a.pow == b.pow: 0
|
||||||
|
else: -1
|
||||||
|
|
||||||
|
proc initMessage*(env: Envelope): Message =
|
||||||
|
result.env = env
|
||||||
|
result.hash = env.calcPowHash()
|
||||||
|
result.size = env.toRlp().len().uint32 # XXX: calc len without creating RLP
|
||||||
|
result.pow = calcPow(result.size, result.env.ttl, result.hash)
|
||||||
|
result.bloom = topicBloom(env.topic)
|
||||||
|
|
||||||
|
proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data)
|
||||||
|
|
||||||
|
proc allowed*(msg: Message, config: WhisperConfig): bool =
|
||||||
|
# Check max msg size, already happens in RLPx but there is a specific shh
|
||||||
|
# max msg size which should always be < RLPx max msg size
|
||||||
|
if msg.size > config.maxMsgSize:
|
||||||
|
warn "Message size too large", size = msg.size
|
||||||
|
return false
|
||||||
|
|
||||||
|
if msg.pow < config.powRequirement:
|
||||||
|
warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement
|
||||||
|
return false
|
||||||
|
|
||||||
|
if not bloomFilterMatch(config.bloom, msg.bloom):
|
||||||
|
warn "Message does not match node bloom filter"
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
# Queues -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc initQueue*(capacity: int): Queue =
|
||||||
|
result.items = newSeqOfCap[Message](capacity)
|
||||||
|
result.capacity = capacity
|
||||||
|
result.itemHashes.init()
|
||||||
|
|
||||||
|
proc prune(self: var Queue) =
|
||||||
|
## Remove items that are past their expiry time
|
||||||
|
let now = epochTime().uint32
|
||||||
|
|
||||||
|
# keepIf code + pruning of hashset
|
||||||
|
var pos = 0
|
||||||
|
for i in 0 ..< len(self.items):
|
||||||
|
if self.items[i].env.expiry > now:
|
||||||
|
if pos != i:
|
||||||
|
shallowCopy(self.items[pos], self.items[i])
|
||||||
|
inc(pos)
|
||||||
|
else: self.itemHashes.excl(self.items[i])
|
||||||
|
setLen(self.items, pos)
|
||||||
|
|
||||||
|
proc add*(self: var Queue, msg: Message): bool =
|
||||||
|
## Add a message to the queue.
|
||||||
|
## If we're at capacity, we will be removing, in order:
|
||||||
|
## * expired messages
|
||||||
|
## * lowest proof-of-work message - this may be `msg` itself!
|
||||||
|
|
||||||
|
if self.items.len >= self.capacity:
|
||||||
|
self.prune() # Only prune if needed
|
||||||
|
|
||||||
|
if self.items.len >= self.capacity:
|
||||||
|
# Still no room - go by proof-of-work quantity
|
||||||
|
let last = self.items[^1]
|
||||||
|
|
||||||
|
if last.pow > msg.pow or
|
||||||
|
(last.pow == msg.pow and last.env.expiry > msg.env.expiry):
|
||||||
|
# The new message has less pow or will expire earlier - drop it
|
||||||
|
return false
|
||||||
|
|
||||||
|
self.items.del(self.items.len() - 1)
|
||||||
|
self.itemHashes.excl(last)
|
||||||
|
|
||||||
|
# check for duplicate
|
||||||
|
if self.itemHashes.containsOrIncl(msg):
|
||||||
|
return false
|
||||||
|
else:
|
||||||
|
self.items.insert(msg, self.items.lowerBound(msg, cmpPow))
|
||||||
|
return true
|
||||||
|
|
||||||
|
# Filters ----------------------------------------------------------------------
|
||||||
|
proc newFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](),
|
||||||
|
symKey = none[SymKey](), topics: seq[Topic] = @[],
|
||||||
|
powReq = 0.0, allowP2P = false): Filter =
|
||||||
|
# Zero topics will give an empty bloom filter which is fine as this bloom
|
||||||
|
# filter is only used to `or` with existing/other bloom filters. Not to do
|
||||||
|
# matching.
|
||||||
|
Filter(src: src, privateKey: privateKey, symKey: symKey, topics: topics,
|
||||||
|
powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics))
|
||||||
|
|
||||||
|
proc subscribeFilter*(filters: var Filters, filter: Filter,
|
||||||
|
handler:FilterMsgHandler = nil): string =
|
||||||
|
# NOTE: Should we allow a filter without a key? Encryption is mandatory in v6?
|
||||||
|
# Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence.
|
||||||
|
let id = generateRandomID()
|
||||||
|
var filter = filter
|
||||||
|
if handler.isNil():
|
||||||
|
filter.queue = newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity)
|
||||||
|
else:
|
||||||
|
filter.handler = handler
|
||||||
|
|
||||||
|
filters.add(id, filter)
|
||||||
|
debug "Filter added", filter = id
|
||||||
|
return id
|
||||||
|
|
||||||
|
proc notify*(filters: var Filters, msg: Message) {.gcsafe.} =
|
||||||
|
var decoded: Option[DecodedPayload]
|
||||||
|
var keyHash: Hash
|
||||||
|
|
||||||
|
for filter in filters.mvalues:
|
||||||
|
if not filter.allowP2P and msg.isP2P:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# if message is direct p2p PoW doesn't matter
|
||||||
|
if msg.pow < filter.powReq and not msg.isP2P:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if filter.topics.len > 0:
|
||||||
|
if msg.env.topic notin filter.topics:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Decode, if already decoded previously check if hash of key matches
|
||||||
|
if decoded.isNone():
|
||||||
|
decoded = decode(msg.env.data, dst = filter.privateKey,
|
||||||
|
symKey = filter.symKey)
|
||||||
|
if filter.privateKey.isSome():
|
||||||
|
keyHash = keccak256.digest(filter.privateKey.get().data)
|
||||||
|
elif filter.symKey.isSome():
|
||||||
|
keyHash = keccak256.digest(filter.symKey.get())
|
||||||
|
# else:
|
||||||
|
# NOTE: should we error on messages without encryption?
|
||||||
|
if decoded.isNone():
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
if filter.privateKey.isSome():
|
||||||
|
if keyHash != keccak256.digest(filter.privateKey.get().data):
|
||||||
|
continue
|
||||||
|
elif filter.symKey.isSome():
|
||||||
|
if keyHash != keccak256.digest(filter.symKey.get()):
|
||||||
|
continue
|
||||||
|
# else:
|
||||||
|
# NOTE: should we error on messages without encryption?
|
||||||
|
|
||||||
|
# When decoding is done we can check the src (signature)
|
||||||
|
if filter.src.isSome():
|
||||||
|
let src: Option[PublicKey] = decoded.get().src
|
||||||
|
if not src.isSome():
|
||||||
|
continue
|
||||||
|
elif src.get() != filter.src.get():
|
||||||
|
continue
|
||||||
|
|
||||||
|
let receivedMsg = ReceivedMessage(decoded: decoded.get(),
|
||||||
|
timestamp: msg.env.expiry - msg.env.ttl,
|
||||||
|
ttl: msg.env.ttl,
|
||||||
|
topic: msg.env.topic,
|
||||||
|
pow: msg.pow,
|
||||||
|
hash: msg.hash)
|
||||||
|
# Either run callback or add to queue
|
||||||
|
if filter.handler.isNil():
|
||||||
|
filter.queue.insert(receivedMsg)
|
||||||
|
else:
|
||||||
|
filter.handler(receivedMsg)
|
||||||
|
|
||||||
|
proc getFilterMessages*(filters: var Filters, filterId: string): seq[ReceivedMessage] =
|
||||||
|
result = @[]
|
||||||
|
if filters.contains(filterId):
|
||||||
|
if filters[filterId].handler.isNil():
|
||||||
|
shallowCopy(result, filters[filterId].queue)
|
||||||
|
filters[filterId].queue =
|
||||||
|
newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity)
|
||||||
|
|
||||||
|
proc toBloom*(filters: Filters): Bloom =
|
||||||
|
for filter in filters.values:
|
||||||
|
if filter.topics.len > 0:
|
||||||
|
result = result or filter.bloom
|
||||||
|
|
||||||
|
type
|
||||||
|
WhisperPeer = ref object
|
||||||
|
initialized*: bool # when successfully completed the handshake
|
||||||
|
powRequirement*: float64
|
||||||
|
bloom*: Bloom
|
||||||
|
isLightNode*: bool
|
||||||
|
trusted*: bool
|
||||||
|
received: HashSet[Message]
|
||||||
|
running*: bool
|
||||||
|
|
||||||
|
WhisperNetwork = ref object
|
||||||
|
queue*: Queue
|
||||||
|
filters*: Filters
|
||||||
|
config*: WhisperConfig
|
||||||
|
|
||||||
|
proc run(peer: Peer) {.gcsafe, async.}
|
||||||
|
proc run(node: EthereumNode, network: WhisperNetwork) {.gcsafe, async.}
|
||||||
|
|
||||||
|
proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} =
|
||||||
|
network.queue = initQueue(defaultQueueCapacity)
|
||||||
|
network.filters = initTable[string, Filter]()
|
||||||
|
network.config.bloom = fullBloom()
|
||||||
|
network.config.powRequirement = defaultMinPow
|
||||||
|
network.config.isLightNode = false
|
||||||
|
network.config.maxMsgSize = defaultMaxMsgSize
|
||||||
|
asyncCheck node.run(network)
|
||||||
|
|
||||||
|
p2pProtocol Whisper(version = whisperVersion,
|
||||||
|
shortName = "shh",
|
||||||
|
peerState = WhisperPeer,
|
||||||
|
networkState = WhisperNetwork):
|
||||||
|
|
||||||
|
onPeerConnected do (peer: Peer):
|
||||||
|
debug "onPeerConnected Whisper"
|
||||||
|
let
|
||||||
|
whisperNet = peer.networkState
|
||||||
|
whisperPeer = peer.state
|
||||||
|
|
||||||
|
let m = await handshake(peer, timeout = 500,
|
||||||
|
status(whisperVersion,
|
||||||
|
cast[uint](whisperNet.config.powRequirement),
|
||||||
|
@(whisperNet.config.bloom),
|
||||||
|
whisperNet.config.isLightNode))
|
||||||
|
|
||||||
|
if m.protocolVersion == whisperVersion:
|
||||||
|
debug "Whisper peer", peer, whisperVersion
|
||||||
|
else:
|
||||||
|
raise newException(UselessPeerError, "Incompatible Whisper version")
|
||||||
|
|
||||||
|
whisperPeer.powRequirement = cast[float64](m.powConverted)
|
||||||
|
|
||||||
|
if m.bloom.len > 0:
|
||||||
|
if m.bloom.len != bloomSize:
|
||||||
|
raise newException(UselessPeerError, "Bloomfilter size mismatch")
|
||||||
|
else:
|
||||||
|
whisperPeer.bloom.bytesCopy(m.bloom)
|
||||||
|
else:
|
||||||
|
# If no bloom filter is send we allow all
|
||||||
|
whisperPeer.bloom = fullBloom()
|
||||||
|
|
||||||
|
whisperPeer.isLightNode = m.isLightNode
|
||||||
|
if whisperPeer.isLightNode and whisperNet.config.isLightNode:
|
||||||
|
# No sense in connecting two light nodes so we disconnect
|
||||||
|
raise newException(UselessPeerError, "Two light nodes connected")
|
||||||
|
|
||||||
|
whisperPeer.received.init()
|
||||||
|
whisperPeer.trusted = false
|
||||||
|
whisperPeer.initialized = true
|
||||||
|
|
||||||
|
if not whisperNet.config.isLightNode:
|
||||||
|
asyncCheck peer.run()
|
||||||
|
|
||||||
|
debug "Whisper peer initialized"
|
||||||
|
|
||||||
|
onPeerDisconnected do (peer: Peer, reason: DisconnectionReason) {.gcsafe.}:
|
||||||
|
peer.state.running = false
|
||||||
|
|
||||||
|
proc status(peer: Peer,
|
||||||
|
protocolVersion: uint,
|
||||||
|
powConverted: uint,
|
||||||
|
bloom: Bytes,
|
||||||
|
isLightNode: bool) =
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
|
||||||
|
if not peer.state.initialized:
|
||||||
|
warn "Handshake not completed yet, discarding messages"
|
||||||
|
return
|
||||||
|
|
||||||
|
for envelope in envelopes:
|
||||||
|
# check if expired or in future, or ttl not 0
|
||||||
|
if not envelope.valid():
|
||||||
|
warn "Expired or future timed envelope"
|
||||||
|
# disconnect from peers sending bad envelopes
|
||||||
|
# await peer.disconnect(SubprotocolReason)
|
||||||
|
continue
|
||||||
|
|
||||||
|
let msg = initMessage(envelope)
|
||||||
|
if not msg.allowed(peer.networkState.config):
|
||||||
|
# disconnect from peers sending bad envelopes
|
||||||
|
# await peer.disconnect(SubprotocolReason)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This peer send this message thus should not receive it again.
|
||||||
|
# If this peer has the message in the `received` set already, this means
|
||||||
|
# it was either already received here from this peer or send to this peer.
|
||||||
|
# Either way it will be in our queue already (and the peer should know
|
||||||
|
# this) and this peer is sending duplicates.
|
||||||
|
if peer.state.received.containsOrIncl(msg):
|
||||||
|
warn "Peer sending duplicate messages"
|
||||||
|
# await peer.disconnect(SubprotocolReason)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# This can still be a duplicate message, but from another peer than
|
||||||
|
# the peer who send the message.
|
||||||
|
if peer.networkState.queue.add(msg):
|
||||||
|
# notify filters of this message
|
||||||
|
peer.networkState.filters.notify(msg)
|
||||||
|
|
||||||
|
proc powRequirement(peer: Peer, value: uint) =
|
||||||
|
if not peer.state.initialized:
|
||||||
|
warn "Handshake not completed yet, discarding powRequirement"
|
||||||
|
return
|
||||||
|
|
||||||
|
peer.state.powRequirement = cast[float64](value)
|
||||||
|
|
||||||
|
proc bloomFilterExchange(peer: Peer, bloom: Bytes) =
|
||||||
|
if not peer.state.initialized:
|
||||||
|
warn "Handshake not completed yet, discarding bloomFilterExchange"
|
||||||
|
return
|
||||||
|
|
||||||
|
peer.state.bloom.bytesCopy(bloom)
|
||||||
|
|
||||||
|
nextID 126
|
||||||
|
|
||||||
|
proc p2pRequest(peer: Peer, envelope: Envelope) =
|
||||||
|
# TODO: here we would have to allow to insert some specific implementation
|
||||||
|
# such as e.g. Whisper Mail Server
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc p2pMessage(peer: Peer, envelope: Envelope) =
|
||||||
|
if peer.state.trusted:
|
||||||
|
# when trusted we can bypass any checks on envelope
|
||||||
|
let msg = Message(env: envelope, isP2P: true)
|
||||||
|
peer.networkState.filters.notify(msg)
|
||||||
|
|
||||||
|
# 'Runner' calls ---------------------------------------------------------------
|
||||||
|
|
||||||
|
proc processQueue(peer: Peer) =
|
||||||
|
var
|
||||||
|
envelopes: seq[Envelope] = @[]
|
||||||
|
whisperPeer = peer.state(Whisper)
|
||||||
|
whisperNet = peer.networkState(Whisper)
|
||||||
|
|
||||||
|
for message in whisperNet.queue.items:
|
||||||
|
if whisperPeer.received.contains(message):
|
||||||
|
# debug "message was already send to peer"
|
||||||
|
continue
|
||||||
|
|
||||||
|
if message.pow < whisperPeer.powRequirement:
|
||||||
|
debug "Message PoW too low for peer"
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not bloomFilterMatch(whisperPeer.bloom, message.bloom):
|
||||||
|
debug "Message does not match peer bloom filter"
|
||||||
|
continue
|
||||||
|
|
||||||
|
debug "Adding envelope"
|
||||||
|
envelopes.add(message.env)
|
||||||
|
whisperPeer.received.incl(message)
|
||||||
|
|
||||||
|
debug "Sending envelopes", amount=envelopes.len
|
||||||
|
# await peer.messages(envelopes)
|
||||||
|
asyncCheck peer.messages(envelopes)
|
||||||
|
|
||||||
|
proc run(peer: Peer) {.async.} =
|
||||||
|
var
|
||||||
|
whisperPeer = peer.state(Whisper)
|
||||||
|
whisperNet = peer.networkState(Whisper)
|
||||||
|
|
||||||
|
whisperPeer.running = true
|
||||||
|
while whisperPeer.running:
|
||||||
|
peer.processQueue()
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
|
||||||
|
proc pruneReceived(node: EthereumNode) =
|
||||||
|
if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ...
|
||||||
|
var whisperNet = node.protocolState(Whisper)
|
||||||
|
|
||||||
|
for peer in node.protocolPeers(Whisper):
|
||||||
|
if not peer.initialized:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# NOTE: Perhaps alter the queue prune call to keep track of a HashSet
|
||||||
|
# of pruned messages (as these should be smaller), and diff this with
|
||||||
|
# the received sets.
|
||||||
|
peer.received = intersection(peer.received, whisperNet.queue.itemHashes)
|
||||||
|
|
||||||
|
proc run(node: EthereumNode, network: WhisperNetwork) {.async.} =
|
||||||
|
while true:
|
||||||
|
# prune message queue every second
|
||||||
|
# TTL unit is in seconds, so this should be sufficient?
|
||||||
|
network.queue.prune()
|
||||||
|
# pruning the received sets is not necessary for correct workings
|
||||||
|
# but simply from keeping the sets growing indefinitely
|
||||||
|
node.pruneReceived()
|
||||||
|
await sleepAsync(pruneInterval)
|
||||||
|
|
||||||
|
# Public EthereumNode calls ----------------------------------------------------
|
||||||
|
|
||||||
|
proc sendP2PMessage*(node: EthereumNode, peerId: NodeId, env: Envelope): bool =
|
||||||
|
for peer in node.peers(Whisper):
|
||||||
|
if peer.remote.id == peerId:
|
||||||
|
asyncCheck peer.p2pMessage(env)
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc sendMessage*(node: EthereumNode, env: var Envelope): bool =
|
||||||
|
if not env.valid(): # actually just ttl !=0 is sufficient
|
||||||
|
return false
|
||||||
|
|
||||||
|
var whisperNet = node.protocolState(Whisper)
|
||||||
|
# We have to do the same checks here as in the messages proc not to leak
|
||||||
|
# any information that the message originates from this node.
|
||||||
|
let msg = initMessage(env)
|
||||||
|
if not msg.allowed(whisperNet.config):
|
||||||
|
return false
|
||||||
|
|
||||||
|
debug "Adding message to queue"
|
||||||
|
if whisperNet.queue.add(msg):
|
||||||
|
# Also notify our own filters of the message we are sending,
|
||||||
|
# e.g. msg from local Dapp to Dapp
|
||||||
|
whisperNet.filters.notify(msg)
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
|
||||||
|
symKey = none[SymKey](), src = none[PrivateKey](),
|
||||||
|
ttl: uint32, topic: Topic, payload: Bytes,
|
||||||
|
padding = none[Bytes](), powTime = 1'f,
|
||||||
|
targetPeer = none[NodeId]()): bool =
|
||||||
|
# NOTE: Allow a post without a key? Encryption is mandatory in v6?
|
||||||
|
let payload = encode(Payload(payload: payload, src: src, dst: pubKey,
|
||||||
|
symKey: symKey, padding: padding))
|
||||||
|
if payload.isSome():
|
||||||
|
var env = Envelope(expiry:epochTime().uint32 + ttl + powTime.uint32,
|
||||||
|
ttl: ttl, topic: topic, data: payload.get(), nonce: 0)
|
||||||
|
|
||||||
|
# Allow lightnode to post only direct p2p messages
|
||||||
|
if targetPeer.isSome():
|
||||||
|
return node.sendP2PMessage(targetPeer.get(), env)
|
||||||
|
elif not node.protocolState(Whisper).config.isLightNode:
|
||||||
|
# XXX: make this non blocking or not?
|
||||||
|
# In its current blocking state, it could be noticed by a peer that no
|
||||||
|
# messages are send for a while, and thus that mining PoW is done, and
|
||||||
|
# that next messages contains a message originated from this peer
|
||||||
|
# zah: It would be hard to execute this in a background thread at the
|
||||||
|
# moment. We'll need a way to send custom "tasks" to the async message
|
||||||
|
# loop (e.g. AD2 support for AsyncChannels).
|
||||||
|
env.nonce = env.minePow(powTime)
|
||||||
|
return node.sendMessage(env)
|
||||||
|
else:
|
||||||
|
error "Light node not allowed to post messages"
|
||||||
|
return false
|
||||||
|
else:
|
||||||
|
error "Encoding of payload failed"
|
||||||
|
return false
|
||||||
|
|
||||||
|
proc subscribeFilter*(node: EthereumNode, filter: Filter,
|
||||||
|
handler:FilterMsgHandler = nil): string =
|
||||||
|
return node.protocolState(Whisper).filters.subscribeFilter(filter, handler)
|
||||||
|
|
||||||
|
proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool =
|
||||||
|
var filter: Filter
|
||||||
|
return node.protocolState(Whisper).filters.take(filterId, filter)
|
||||||
|
|
||||||
|
proc getFilterMessages*(node: EthereumNode, filterId: string): seq[ReceivedMessage] =
|
||||||
|
return node.protocolState(Whisper).filters.getFilterMessages(filterId)
|
||||||
|
|
||||||
|
proc filtersToBloom*(node: EthereumNode): Bloom =
|
||||||
|
return node.protocolState(Whisper).filters.toBloom()
|
||||||
|
|
||||||
|
proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} =
|
||||||
|
# NOTE: do we need a tolerance of old PoW for some time?
|
||||||
|
node.protocolState(Whisper).config.powRequirement = powReq
|
||||||
|
var futures: seq[Future[void]] = @[]
|
||||||
|
for peer in node.peers(Whisper):
|
||||||
|
futures.add(peer.powRequirement(cast[uint](powReq)))
|
||||||
|
|
||||||
|
await all(futures)
|
||||||
|
|
||||||
|
proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
|
||||||
|
# NOTE: do we need a tolerance of old bloom filter for some time?
|
||||||
|
node.protocolState(Whisper).config.bloom = bloom
|
||||||
|
var futures: seq[Future[void]] = @[]
|
||||||
|
for peer in node.peers(Whisper):
|
||||||
|
futures.add(peer.bloomFilterExchange(@bloom))
|
||||||
|
|
||||||
|
await all(futures)
|
||||||
|
|
||||||
|
proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool =
|
||||||
|
if size > defaultMaxMsgSize:
|
||||||
|
error "size > maxMsgSize"
|
||||||
|
return false
|
||||||
|
node.protocolState(Whisper).config.maxMsgSize = size
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool =
|
||||||
|
for peer in node.peers(Whisper):
|
||||||
|
if peer.remote.id == peerId:
|
||||||
|
peer.state(Whisper).trusted = true
|
||||||
|
return true
|
||||||
|
|
||||||
|
# NOTE: Should be run before connection is made with peers
|
||||||
|
proc setLightNode*(node: EthereumNode, isLightNode: bool) =
|
||||||
|
node.protocolState(Whisper).config.isLightNode = isLightNode
|
||||||
|
|
||||||
|
# NOTE: Should be run before connection is made with peers
|
||||||
|
proc configureWhisper*(node: EthereumNode, config: WhisperConfig) =
|
||||||
|
node.protocolState(Whisper).config = config
|
||||||
|
|
||||||
|
# Not something that should be run in normal circumstances
|
||||||
|
proc resetMessageQueue*(node: EthereumNode) =
|
||||||
|
node.protocolState(Whisper).queue = initQueue(defaultQueueCapacity)
|
|
@ -0,0 +1,235 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
#
|
||||||
|
|
||||||
|
## This module implements RLPx cryptography
|
||||||
|
|
||||||
|
import ranges/stackarrays, eth/rlp/types, nimcrypto
|
||||||
|
from auth import ConnectionSecret
|
||||||
|
|
||||||
|
const
|
||||||
|
RlpHeaderLength* = 16
|
||||||
|
RlpMacLength* = 16
|
||||||
|
maxUInt24 = (not uint32(0)) shl 8
|
||||||
|
|
||||||
|
type
|
||||||
|
SecretState* = object
|
||||||
|
## Object represents current encryption/decryption context.
|
||||||
|
aesenc*: CTR[aes256]
|
||||||
|
aesdec*: CTR[aes256]
|
||||||
|
macenc*: ECB[aes256]
|
||||||
|
emac*: keccak256
|
||||||
|
imac*: keccak256
|
||||||
|
|
||||||
|
RlpxStatus* = enum
|
||||||
|
Success, ## Operation was successful
|
||||||
|
IncorrectMac, ## MAC verification failed
|
||||||
|
BufferOverrun, ## Buffer overrun error
|
||||||
|
IncompleteError, ## Data incomplete error
|
||||||
|
IncorrectArgs ## Incorrect arguments
|
||||||
|
|
||||||
|
RlpxHeader* = array[16, byte]
|
||||||
|
|
||||||
|
proc roundup16*(x: int): int {.inline.} =
|
||||||
|
## Procedure aligns `x` to
|
||||||
|
let rem = x and 15
|
||||||
|
if rem != 0:
|
||||||
|
result = x + 16 - rem
|
||||||
|
else:
|
||||||
|
result = x
|
||||||
|
|
||||||
|
template toa(a, b, c: untyped): untyped =
|
||||||
|
toOpenArray((a), (b), (b) + (c) - 1)
|
||||||
|
|
||||||
|
proc sxor[T](a: var openarray[T], b: openarray[T]) {.inline.} =
|
||||||
|
assert(len(a) == len(b))
|
||||||
|
for i in 0 ..< len(a):
|
||||||
|
a[i] = a[i] xor b[i]
|
||||||
|
|
||||||
|
proc initSecretState*(secrets: ConnectionSecret, context: var SecretState) =
|
||||||
|
## Initialized `context` with values from `secrets`.
|
||||||
|
|
||||||
|
# FIXME: Yes, the encryption is insecure,
|
||||||
|
# see: https://github.com/ethereum/devp2p/issues/32
|
||||||
|
# https://github.com/ethereum/py-evm/blob/master/p2p/peer.py#L159-L160
|
||||||
|
var iv: array[context.aesenc.sizeBlock, byte]
|
||||||
|
context.aesenc.init(secrets.aesKey, iv)
|
||||||
|
context.aesdec = context.aesenc
|
||||||
|
context.macenc.init(secrets.macKey)
|
||||||
|
context.emac = secrets.egressMac
|
||||||
|
context.imac = secrets.ingressMac
|
||||||
|
|
||||||
|
template encryptedLength*(size: int): int =
|
||||||
|
## Returns the number of bytes used by the entire frame of a
|
||||||
|
## message with size `size`:
|
||||||
|
RlpHeaderLength + roundup16(size) + 2 * RlpMacLength
|
||||||
|
|
||||||
|
template decryptedLength*(size: int): int =
|
||||||
|
## Returns size of decrypted message for body with length `size`.
|
||||||
|
roundup16(size)
|
||||||
|
|
||||||
|
proc encrypt*(c: var SecretState, header: openarray[byte],
|
||||||
|
frame: openarray[byte],
|
||||||
|
output: var openarray[byte]): RlpxStatus =
|
||||||
|
## Encrypts `header` and `frame` using SecretState `c` context and store
|
||||||
|
## result into `output`.
|
||||||
|
##
|
||||||
|
## `header` must be exactly `RlpHeaderLength` length.
|
||||||
|
## `frame` must not be zero length.
|
||||||
|
## `output` must be at least `encryptedLength(len(frame))` length.
|
||||||
|
var
|
||||||
|
tmpmac: keccak256
|
||||||
|
aes: array[RlpHeaderLength, byte]
|
||||||
|
let length = encryptedLength(len(frame))
|
||||||
|
let frameLength = roundup16(len(frame))
|
||||||
|
let headerMacPos = RlpHeaderLength
|
||||||
|
let framePos = RlpHeaderLength + RlpMacLength
|
||||||
|
let frameMacPos = RlpHeaderLength * 2 + frameLength
|
||||||
|
if len(header) != RlpHeaderLength or len(frame) == 0 or length != len(output):
|
||||||
|
return IncorrectArgs
|
||||||
|
# header_ciphertext = self.aes_enc.update(header)
|
||||||
|
c.aesenc.encrypt(header, toa(output, 0, RlpHeaderLength))
|
||||||
|
# mac_secret = self.egress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.emac
|
||||||
|
var macsec = tmpmac.finish()
|
||||||
|
# self.egress_mac.update(sxor(self.mac_enc(mac_secret), header_ciphertext))
|
||||||
|
c.macenc.encrypt(toa(macsec.data, 0, RlpHeaderLength), aes)
|
||||||
|
sxor(aes, toa(output, 0, RlpHeaderLength))
|
||||||
|
c.emac.update(aes)
|
||||||
|
burnMem(aes)
|
||||||
|
# header_mac = self.egress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.emac
|
||||||
|
var headerMac = tmpmac.finish()
|
||||||
|
# frame_ciphertext = self.aes_enc.update(frame)
|
||||||
|
copyMem(addr output[framePos], unsafeAddr frame[0], len(frame))
|
||||||
|
c.aesenc.encrypt(toa(output, 32, frameLength), toa(output, 32, frameLength))
|
||||||
|
# self.egress_mac.update(frame_ciphertext)
|
||||||
|
c.emac.update(toa(output, 32, frameLength))
|
||||||
|
# fmac_seed = self.egress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.emac
|
||||||
|
var seed = tmpmac.finish()
|
||||||
|
# mac_secret = self.egress_mac.digest()[:HEADER_LEN]
|
||||||
|
macsec = seed
|
||||||
|
# self.egress_mac.update(sxor(self.mac_enc(mac_secret), fmac_seed))
|
||||||
|
c.macenc.encrypt(toa(macsec.data, 0, RlpHeaderLength), aes)
|
||||||
|
sxor(aes, toa(seed.data, 0, RlpHeaderLength))
|
||||||
|
c.emac.update(aes)
|
||||||
|
burnMem(aes)
|
||||||
|
# frame_mac = self.egress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.emac
|
||||||
|
var frameMac = tmpmac.finish()
|
||||||
|
tmpmac.clear()
|
||||||
|
# return header_ciphertext + header_mac + frame_ciphertext + frame_mac
|
||||||
|
copyMem(addr output[headerMacPos], addr headerMac.data[0], RlpHeaderLength)
|
||||||
|
copyMem(addr output[frameMacPos], addr frameMac.data[0], RlpHeaderLength)
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc encryptMsg*(msg: openarray[byte], secrets: var SecretState): seq[byte] =
|
||||||
|
var header: RlpxHeader
|
||||||
|
|
||||||
|
if uint32(msg.len) > maxUInt24:
|
||||||
|
raise newException(OverflowError, "RLPx message size exceeds limit")
|
||||||
|
|
||||||
|
# write the frame size in the first 3 bytes of the header
|
||||||
|
header[0] = byte((msg.len shr 16) and 0xFF)
|
||||||
|
header[1] = byte((msg.len shr 8) and 0xFF)
|
||||||
|
header[2] = byte(msg.len and 0xFF)
|
||||||
|
|
||||||
|
# XXX:
|
||||||
|
# This would be safer if we use a thread-local sequ for the temporary buffer
|
||||||
|
result = newSeq[byte](encryptedLength(msg.len))
|
||||||
|
let s = encrypt(secrets, header, msg, result)
|
||||||
|
assert s == Success
|
||||||
|
|
||||||
|
proc getBodySize*(a: RlpxHeader): int =
|
||||||
|
(int(a[0]) shl 16) or (int(a[1]) shl 8) or int(a[2])
|
||||||
|
|
||||||
|
proc decryptHeader*(c: var SecretState, data: openarray[byte],
|
||||||
|
output: var openarray[byte]): RlpxStatus =
|
||||||
|
## Decrypts header `data` using SecretState `c` context and store
|
||||||
|
## result into `output`.
|
||||||
|
##
|
||||||
|
## `header` must be exactly `RlpHeaderLength + RlpMacLength` length.
|
||||||
|
## `output` must be at least `RlpHeaderLength` length.
|
||||||
|
var
|
||||||
|
tmpmac: keccak256
|
||||||
|
aes: array[RlpHeaderLength, byte]
|
||||||
|
|
||||||
|
if len(data) != RlpHeaderLength + RlpMacLength:
|
||||||
|
return IncompleteError
|
||||||
|
if len(output) < RlpHeaderLength:
|
||||||
|
return IncorrectArgs
|
||||||
|
# mac_secret = self.ingress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.imac
|
||||||
|
var macsec = tmpmac.finish()
|
||||||
|
# aes = self.mac_enc(mac_secret)[:HEADER_LEN]
|
||||||
|
c.macenc.encrypt(toa(macsec.data, 0, RlpHeaderLength), aes)
|
||||||
|
# self.ingress_mac.update(sxor(aes, header_ciphertext))
|
||||||
|
sxor(aes, toa(data, 0, RlpHeaderLength))
|
||||||
|
c.imac.update(aes)
|
||||||
|
burnMem(aes)
|
||||||
|
# expected_header_mac = self.ingress_mac.digest()[:HEADER_LEN]
|
||||||
|
tmpmac = c.imac
|
||||||
|
var expectMac = tmpmac.finish()
|
||||||
|
# if not bytes_eq(expected_header_mac, header_mac):
|
||||||
|
let headerMacPos = RlpHeaderLength
|
||||||
|
if not equalMem(cast[pointer](unsafeAddr data[headerMacPos]),
|
||||||
|
cast[pointer](addr expectMac.data[0]), RlpMacLength):
|
||||||
|
result = IncorrectMac
|
||||||
|
else:
|
||||||
|
# return self.aes_dec.update(header_ciphertext)
|
||||||
|
c.aesdec.decrypt(toa(data, 0, RlpHeaderLength), output)
|
||||||
|
result = Success
|
||||||
|
|
||||||
|
proc decryptHeaderAndGetMsgSize*(c: var SecretState,
|
||||||
|
encryptedHeader: openarray[byte],
|
||||||
|
outSize: var int): RlpxStatus =
|
||||||
|
var decryptedHeader: RlpxHeader
|
||||||
|
result = decryptHeader(c, encryptedHeader, decryptedHeader)
|
||||||
|
if result == Success:
|
||||||
|
outSize = decryptedHeader.getBodySize
|
||||||
|
|
||||||
|
proc decryptBody*(c: var SecretState, data: openarray[byte], bodysize: int,
|
||||||
|
output: var openarray[byte], outlen: var int): RlpxStatus =
|
||||||
|
## Decrypts body `data` using SecretState `c` context and store
|
||||||
|
## result into `output`.
|
||||||
|
##
|
||||||
|
## `data` must be at least `roundup16(bodysize) + RlpMacLength` length.
|
||||||
|
## `output` must be at least `roundup16(bodysize)` length.
|
||||||
|
##
|
||||||
|
## On success completion `outlen` will hold actual size of decrypted body.
|
||||||
|
var
|
||||||
|
tmpmac: keccak256
|
||||||
|
aes: array[RlpHeaderLength, byte]
|
||||||
|
outlen = 0
|
||||||
|
let rsize = roundup16(bodysize)
|
||||||
|
if len(data) < rsize + RlpMacLength:
|
||||||
|
return IncompleteError
|
||||||
|
if len(output) < rsize:
|
||||||
|
return IncorrectArgs
|
||||||
|
# self.ingress_mac.update(frame_ciphertext)
|
||||||
|
c.imac.update(toa(data, 0, rsize))
|
||||||
|
tmpmac = c.imac
|
||||||
|
# fmac_seed = self.ingress_mac.digest()[:MAC_LEN]
|
||||||
|
var seed = tmpmac.finish()
|
||||||
|
# self.ingress_mac.update(sxor(self.mac_enc(fmac_seed), fmac_seed))
|
||||||
|
c.macenc.encrypt(toa(seed.data, 0, RlpHeaderLength), aes)
|
||||||
|
sxor(aes, toa(seed.data, 0, RlpHeaderLength))
|
||||||
|
c.imac.update(aes)
|
||||||
|
# expected_frame_mac = self.ingress_mac.digest()[:MAC_LEN]
|
||||||
|
tmpmac = c.imac
|
||||||
|
var expectMac = tmpmac.finish()
|
||||||
|
let bodyMacPos = rsize
|
||||||
|
if not equalMem(cast[pointer](unsafeAddr data[bodyMacPos]),
|
||||||
|
cast[pointer](addr expectMac.data[0]), RlpMacLength):
|
||||||
|
result = IncorrectMac
|
||||||
|
else:
|
||||||
|
c.aesdec.decrypt(toa(data, 0, rsize), output)
|
||||||
|
outlen = bodysize
|
||||||
|
result = Success
|
|
@ -0,0 +1,43 @@
|
||||||
|
import times, asyncdispatch2
|
||||||
|
|
||||||
|
type
|
||||||
|
FullNodeSyncer* = ref object
|
||||||
|
chaindb: ChainDB
|
||||||
|
FastChainSyncer = ref object
|
||||||
|
RegularChainSyncer = ref object
|
||||||
|
|
||||||
|
# How old (in seconds) must our local head be to cause us to start with a fast-sync before we
|
||||||
|
# switch to regular-sync.
|
||||||
|
const FAST_SYNC_CUTOFF = 60 * 60 * 24
|
||||||
|
|
||||||
|
|
||||||
|
proc run(s: FullNodeSyncer) {.async.} =
|
||||||
|
let head = await s.chaindb.getCanonicalHead()
|
||||||
|
|
||||||
|
# We're still too slow at block processing, so if our local head is older than
|
||||||
|
# FAST_SYNC_CUTOFF we first do a fast-sync run to catch up with the rest of the network.
|
||||||
|
# See https://github.com/ethereum/py-evm/issues/654 for more details
|
||||||
|
if head.timestamp < epochTime() - FAST_SYNC_CUTOFF:
|
||||||
|
# Fast-sync chain data.
|
||||||
|
self.logger.info("Starting fast-sync; current head: #%d", head.block_number)
|
||||||
|
chain_syncer = FastChainSyncer(self.chaindb, self.peer_pool, self.cancel_token)
|
||||||
|
await chain_syncer.run()
|
||||||
|
|
||||||
|
# Ensure we have the state for our current head.
|
||||||
|
head = await self.wait(self.chaindb.coro_get_canonical_head())
|
||||||
|
if head.state_root != BLANK_ROOT_HASH and head.state_root not in self.base_db:
|
||||||
|
self.logger.info(
|
||||||
|
"Missing state for current head (#%d), downloading it", head.block_number)
|
||||||
|
downloader = StateDownloader(
|
||||||
|
self.base_db, head.state_root, self.peer_pool, self.cancel_token)
|
||||||
|
await downloader.run()
|
||||||
|
|
||||||
|
# Now, loop forever, fetching missing blocks and applying them.
|
||||||
|
self.logger.info("Starting regular sync; current head: #%d", head.block_number)
|
||||||
|
# This is a bit of a hack, but self.chain is stuck in the past as during the fast-sync we
|
||||||
|
# did not use it to import the blocks, so we need this to get a Chain instance with our
|
||||||
|
# latest head so that we can start importing blocks.
|
||||||
|
new_chain = type(self.chain)(self.base_db)
|
||||||
|
chain_syncer = RegularChainSyncer(
|
||||||
|
new_chain, self.chaindb, self.peer_pool, self.cancel_token)
|
||||||
|
await chain_syncer.run()
|
|
@ -7,7 +7,7 @@
|
||||||
#
|
#
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
import ../src/eth_keys,
|
import eth/keys,
|
||||||
./config
|
./config
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
import
|
||||||
|
testecies, testauth, testcrypt, tshh,
|
||||||
|
les/test_flow_control
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
--threads:on
|
||||||
|
--path:"$projectDir/../.."
|
||||||
|
--d:testing
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
--threads:on
|
||||||
|
--path:"$projectDir/../../.."
|
||||||
|
--d:testing
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
import
|
||||||
|
eth/p2p/rlpx_protocols/les/flow_control
|
||||||
|
|
||||||
|
flow_control.tests()
|
||||||
|
|
|
@ -0,0 +1,190 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import
|
||||||
|
sequtils, options, strutils, parseopt, asyncdispatch2,
|
||||||
|
eth/[keys, rlp, p2p], eth/p2p/rlpx_protocols/[whisper_protocol],
|
||||||
|
eth/p2p/[discovery, enode, peer_pool]
|
||||||
|
|
||||||
|
const
|
||||||
|
DefaultListeningPort = 30303
|
||||||
|
Usage = """Usage:
|
||||||
|
tssh_client [options]
|
||||||
|
Options:
|
||||||
|
-p --port Listening port
|
||||||
|
--post Post messages
|
||||||
|
--watch Install filters
|
||||||
|
--mainnet Connect to main network (default local private)
|
||||||
|
--local Only local loopback
|
||||||
|
--help Display this help and exit"""
|
||||||
|
|
||||||
|
DockerBootnode = "enode://f41f87f084ed7df4a9fd0833e395f49c89764462d3c4bc16d061a3ae5e3e34b79eb47d61c2f62db95ff32ae8e20965e25a3c9d9b8dbccaa8e8d77ac6fc8efc06@172.17.0.2:30301"
|
||||||
|
# bootnodes taken from go-ethereum
|
||||||
|
MainBootnodes* = [
|
||||||
|
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
||||||
|
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303",
|
||||||
|
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303",
|
||||||
|
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303",
|
||||||
|
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||||
|
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
|
||||||
|
]
|
||||||
|
# Whisper nodes taken from:
|
||||||
|
# https://github.com/status-im/status-react/blob/80aa0e92864c638777a45c3f2aeb66c3ae7c0b2e/resources/config/fleets.json
|
||||||
|
# These are probably not on the main network?
|
||||||
|
WhisperNodes = [
|
||||||
|
"enode://66ba15600cda86009689354c3a77bdf1a97f4f4fb3ab50ffe34dbc904fac561040496828397be18d9744c75881ffc6ac53729ddbd2cdbdadc5f45c400e2622f7@206.189.243.176:30305",
|
||||||
|
"enode://0440117a5bc67c2908fad94ba29c7b7f2c1536e96a9df950f3265a9566bf3a7306ea8ab5a1f9794a0a641dcb1e4951ce7c093c61c0d255f4ed5d2ed02c8fce23@35.224.15.65:30305",
|
||||||
|
"enode://a80eb084f6bf3f98bf6a492fd6ba3db636986b17643695f67f543115d93d69920fb72e349e0c617a01544764f09375bb85f452b9c750a892d01d0e627d9c251e@47.89.16.125:30305",
|
||||||
|
"enode://4ea35352702027984a13274f241a56a47854a7fd4b3ba674a596cff917d3c825506431cf149f9f2312a293bb7c2b1cca55db742027090916d01529fe0729643b@206.189.243.178:30305",
|
||||||
|
"enode://552942cc4858073102a6bcd0df9fe4de6d9fc52ddf7363e8e0746eba21b0f98fb37e8270bc629f72cfe29e0b3522afaf51e309a05998736e2c0dad5288991148@130.211.215.133:30305",
|
||||||
|
"enode://aa97756bc147d74be6d07adfc465266e17756339d3d18591f4be9d1b2e80b86baf314aed79adbe8142bcb42bc7bc40e83ee3bbd0b82548e595bf855d548906a1@47.52.188.241:30305",
|
||||||
|
"enode://ce559a37a9c344d7109bd4907802dd690008381d51f658c43056ec36ac043338bd92f1ac6043e645b64953b06f27202d679756a9c7cf62fdefa01b2e6ac5098e@206.189.243.179:30305",
|
||||||
|
"enode://b33dc678589931713a085d29f9dc0efee1783dacce1d13696eb5d3a546293198470d97822c40b187336062b39fd3464e9807858109752767d486ea699a6ab3de@35.193.151.184:30305",
|
||||||
|
"enode://f34451823b173dc5f2ac0eec1668fdb13dba9452b174249a7e0272d6dce16fb811a01e623300d1b7a67c240ae052a462bff3f60e4a05e4c4bd23cc27dea57051@47.52.173.66:30305",
|
||||||
|
"enode://4e0a8db9b73403c9339a2077e911851750fc955db1fc1e09f81a4a56725946884dd5e4d11258eac961f9078a393c45bcab78dd0e3bc74e37ce773b3471d2e29c@206.189.243.171:30305",
|
||||||
|
"enode://eb4cc33c1948b1f4b9cb8157757645d78acd731cc8f9468ad91cef8a7023e9c9c62b91ddab107043aabc483742ac15cb4372107b23962d3bfa617b05583f2260@146.148.66.209:30305",
|
||||||
|
"enode://7c80e37f324bbc767d890e6381854ef9985d33940285413311e8b5927bf47702afa40cd5d34be9aa6183ac467009b9545e24b0d0bc54ef2b773547bb8c274192@47.91.155.62:30305",
|
||||||
|
"enode://a8bddfa24e1e92a82609b390766faa56cf7a5eef85b22a2b51e79b333c8aaeec84f7b4267e432edd1cf45b63a3ad0fc7d6c3a16f046aa6bc07ebe50e80b63b8c@206.189.243.172:30305",
|
||||||
|
"enode://c7e00e5a333527c009a9b8f75659d9e40af8d8d896ebaa5dbdd46f2c58fc010e4583813bc7fc6da98fcf4f9ca7687d37ced8390330ef570d30b5793692875083@35.192.123.253:30305",
|
||||||
|
"enode://4b2530d045b1d9e0e45afa7c008292744fe77675462090b4001f85faf03b87aa79259c8a3d6d64f815520ac76944e795cbf32ff9e2ce9ba38f57af00d1cc0568@47.90.29.122:30305",
|
||||||
|
"enode://887cbd92d95afc2c5f1e227356314a53d3d18855880ac0509e0c0870362aee03939d4074e6ad31365915af41d34320b5094bfcc12a67c381788cd7298d06c875@206.189.243.177:30305",
|
||||||
|
"enode://2af8f4f7a0b5aabaf49eb72b9b59474b1b4a576f99a869e00f8455928fa242725864c86bdff95638a8b17657040b21771a7588d18b0f351377875f5b46426594@35.232.187.4:30305",
|
||||||
|
"enode://76ee16566fb45ca7644c8dec7ac74cadba3bfa0b92c566ad07bcb73298b0ffe1315fd787e1f829e90dba5cd3f4e0916e069f14e50e9cbec148bead397ac8122d@47.91.226.75:30305",
|
||||||
|
"enode://2b01955d7e11e29dce07343b456e4e96c081760022d1652b1c4b641eaf320e3747871870fa682e9e9cfb85b819ce94ed2fee1ac458904d54fd0b97d33ba2c4a4@206.189.240.70:30305",
|
||||||
|
"enode://19872f94b1e776da3a13e25afa71b47dfa99e658afd6427ea8d6e03c22a99f13590205a8826443e95a37eee1d815fc433af7a8ca9a8d0df7943d1f55684045b7@35.238.60.236:30305"
|
||||||
|
]
|
||||||
|
|
||||||
|
type
|
||||||
|
ShhConfig* = object
|
||||||
|
listeningPort*: int
|
||||||
|
post*: bool
|
||||||
|
watch*: bool
|
||||||
|
main*: bool
|
||||||
|
local*: bool
|
||||||
|
|
||||||
|
proc processArguments*(): ShhConfig =
|
||||||
|
var opt = initOptParser()
|
||||||
|
var length = 0
|
||||||
|
for kind, key, value in opt.getopt():
|
||||||
|
case kind
|
||||||
|
of cmdArgument:
|
||||||
|
echo key
|
||||||
|
of cmdLongOption, cmdShortOption:
|
||||||
|
inc(length)
|
||||||
|
case key.toLowerAscii()
|
||||||
|
of "help", "h": quit(Usage, QuitSuccess)
|
||||||
|
of "port", "p":
|
||||||
|
result.listeningPort = value.parseInt
|
||||||
|
of "post":
|
||||||
|
result.post = true
|
||||||
|
of "watch":
|
||||||
|
result.watch = true
|
||||||
|
of "mainnet":
|
||||||
|
result.main = true
|
||||||
|
of "local":
|
||||||
|
result.local = true
|
||||||
|
else: quit(Usage)
|
||||||
|
of cmdEnd:
|
||||||
|
quit(Usage)
|
||||||
|
|
||||||
|
let config = processArguments()
|
||||||
|
|
||||||
|
var port: Port
|
||||||
|
var address: Address
|
||||||
|
var netId: uint
|
||||||
|
|
||||||
|
# config
|
||||||
|
if config.listeningPort != 0:
|
||||||
|
port = Port(config.listeningPort)
|
||||||
|
else:
|
||||||
|
port = Port(DefaultListeningPort)
|
||||||
|
if config.local:
|
||||||
|
address = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
else:
|
||||||
|
address = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("0.0.0.0"))
|
||||||
|
if config.main:
|
||||||
|
netId = 1
|
||||||
|
else:
|
||||||
|
netId = 15
|
||||||
|
|
||||||
|
let keys = newKeyPair()
|
||||||
|
var node = newEthereumNode(keys, address, netId, nil, addAllCapabilities = false)
|
||||||
|
node.addCapability Whisper
|
||||||
|
|
||||||
|
# lets prepare some prearranged keypairs
|
||||||
|
let encPrivateKey = initPrivateKey("5dc5381cae54ba3174dc0d46040fe11614d0cc94d41185922585198b4fcef9d3")
|
||||||
|
let encPublicKey = encPrivateKey.getPublicKey()
|
||||||
|
let signPrivateKey = initPrivateKey("365bda0757d22212b04fada4b9222f8c3da59b49398fa04cf612481cd893b0a3")
|
||||||
|
let signPublicKey = signPrivateKey.getPublicKey()
|
||||||
|
var symKey: SymKey
|
||||||
|
# To test with geth: all 0's key is invalid in geth console
|
||||||
|
symKey[31] = 1
|
||||||
|
let topic = [byte 0x12, 0, 0, 0]
|
||||||
|
|
||||||
|
if config.main:
|
||||||
|
var bootnodes: seq[ENode] = @[]
|
||||||
|
for nodeId in MainBootnodes:
|
||||||
|
var bootnode: ENode
|
||||||
|
discard initENode(nodeId, bootnode)
|
||||||
|
bootnodes.add(bootnode)
|
||||||
|
|
||||||
|
asyncCheck node.connectToNetwork(bootnodes, true, true)
|
||||||
|
# main network has mostly non SHH nodes, so we connect directly to SHH nodes
|
||||||
|
for nodeId in WhisperNodes:
|
||||||
|
var whisperENode: ENode
|
||||||
|
discard initENode(nodeId, whisperENode)
|
||||||
|
var whisperNode = newNode(whisperENode)
|
||||||
|
asyncCheck node.peerPool.connectToNode(whisperNode)
|
||||||
|
else:
|
||||||
|
var bootENode: ENode
|
||||||
|
discard initENode(DockerBootNode, bootENode)
|
||||||
|
waitFor node.connectToNetwork(@[bootENode], true, true)
|
||||||
|
|
||||||
|
if config.watch:
|
||||||
|
proc handler(msg: ReceivedMessage) =
|
||||||
|
echo msg.decoded.payload.repr
|
||||||
|
|
||||||
|
# filter encrypted asym
|
||||||
|
discard node.subscribeFilter(newFilter(privateKey = some(encPrivateKey),
|
||||||
|
topics = @[topic]),
|
||||||
|
handler)
|
||||||
|
# filter encrypted asym + signed
|
||||||
|
discard node.subscribeFilter(newFilter(some(signPublicKey),
|
||||||
|
privateKey = some(encPrivateKey),
|
||||||
|
topics = @[topic]),
|
||||||
|
handler)
|
||||||
|
# filter encrypted sym
|
||||||
|
discard node.subscribeFilter(newFilter(symKey = some(symKey),
|
||||||
|
topics = @[topic]),
|
||||||
|
handler)
|
||||||
|
# filter encrypted sym + signed
|
||||||
|
discard node.subscribeFilter(newFilter(some(signPublicKey),
|
||||||
|
symKey = some(symKey),
|
||||||
|
topics = @[topic]),
|
||||||
|
handler)
|
||||||
|
|
||||||
|
if config.post:
|
||||||
|
# encrypted asym
|
||||||
|
discard node.postMessage(some(encPublicKey), ttl = 5, topic = topic,
|
||||||
|
payload = repeat(byte 65, 10))
|
||||||
|
poll()
|
||||||
|
# # encrypted asym + signed
|
||||||
|
discard node.postMessage(some(encPublicKey), src = some(signPrivateKey),
|
||||||
|
ttl = 5, topic = topic, payload = repeat(byte 66, 10))
|
||||||
|
poll()
|
||||||
|
# # encrypted sym
|
||||||
|
discard node.postMessage(symKey = some(symKey), ttl = 5, topic = topic,
|
||||||
|
payload = repeat(byte 67, 10))
|
||||||
|
poll()
|
||||||
|
# # encrypted sym + signed
|
||||||
|
discard node.postMessage(symKey = some(symKey), src = some(signPrivateKey),
|
||||||
|
ttl = 5, topic = topic, payload = repeat(byte 68, 10))
|
||||||
|
|
||||||
|
while true:
|
||||||
|
poll()
|
|
@ -0,0 +1,468 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# See the file "LICENSE", included in this
|
||||||
|
# distribution, for details about the copyright.
|
||||||
|
#
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import eth/keys, nimcrypto/[utils, keccak]
|
||||||
|
import eth/p2p/auth
|
||||||
|
|
||||||
|
# This was generated by `print` actual auth message generated by
|
||||||
|
# https://github.com/ethereum/py-evm/blob/master/tests/p2p/test_auth.py
|
||||||
|
const pyevmAuth = """
|
||||||
|
22034ad2e7545e2b0bf02ecb1e40db478dfbbf7aeecc834aec2523eb2b7e74ee
|
||||||
|
77ba40c70a83bfe9f2ab91f0131546dcf92c3ee8282d9907fee093017fd0302d
|
||||||
|
0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46
|
||||||
|
cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406
|
||||||
|
4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74
|
||||||
|
d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb
|
||||||
|
1100"""
|
||||||
|
|
||||||
|
# This data comes from https://gist.github.com/fjl/3a78780d17c755d22df2
|
||||||
|
const data = [
|
||||||
|
("initiator_private_key",
|
||||||
|
"5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051"),
|
||||||
|
("receiver_private_key",
|
||||||
|
"c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"),
|
||||||
|
("initiator_ephemeral_private_key",
|
||||||
|
"19c2185f4f40634926ebed3af09070ca9e029f2edd5fae6253074896205f5f6c"),
|
||||||
|
("receiver_ephemeral_private_key",
|
||||||
|
"d25688cf0ab10afa1a0e2dba7853ed5f1e5bf1c631757ed4e103b593ff3f5620"),
|
||||||
|
("auth_plaintext",
|
||||||
|
"""884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f
|
||||||
|
46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa57
|
||||||
|
0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46
|
||||||
|
cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406
|
||||||
|
4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74
|
||||||
|
d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb
|
||||||
|
1100"""),
|
||||||
|
("authresp_plaintext",
|
||||||
|
"""802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e
|
||||||
|
9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397
|
||||||
|
f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7
|
||||||
|
00"""),
|
||||||
|
("auth_ciphertext",
|
||||||
|
"""04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc
|
||||||
|
43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a
|
||||||
|
514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a
|
||||||
|
2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c540
|
||||||
|
4a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d
|
||||||
|
2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312
|
||||||
|
021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e77
|
||||||
|
23eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93
|
||||||
|
d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576d
|
||||||
|
c017fdd3d581e83cfd26cf125b6d2bda1f1d56"""),
|
||||||
|
("authresp_ciphertext",
|
||||||
|
"""049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f
|
||||||
|
5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42
|
||||||
|
dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b79
|
||||||
|
9771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92b
|
||||||
|
b9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f
|
||||||
|
09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310
|
||||||
|
d2cc8b1b9334728f97220bb799376233e113"""),
|
||||||
|
("ecdhe_shared_secret",
|
||||||
|
"e3f407f83fc012470c26a93fdff534100f2c6f736439ce0ca90e9914f7d1c381"),
|
||||||
|
("initiator_nonce",
|
||||||
|
"cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb11"),
|
||||||
|
("receiver_nonce",
|
||||||
|
"f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7"),
|
||||||
|
("aes_secret",
|
||||||
|
"c0458fa97a5230830e05f4f20b7c755c1d4e54b1ce5cf43260bb191eef4e418d"),
|
||||||
|
("mac_secret",
|
||||||
|
"48c938884d5067a1598272fcddaa4b833cd5e7d92e8228c0ecdfabbe68aef7f1"),
|
||||||
|
("token",
|
||||||
|
"3f9ec2592d1554852b1f54d228f042ed0a9310ea86d038dc2b401ba8cd7fdac4"),
|
||||||
|
("initial_egress_MAC",
|
||||||
|
"09771e93b1a6109e97074cbe2d2b0cf3d3878efafe68f53c41bb60c0ec49097e"),
|
||||||
|
("initial_ingress_MAC",
|
||||||
|
"75823d96e23136c89666ee025fb21a432be906512b3dd4a3049e898adb433847"),
|
||||||
|
("initiator_hello_packet",
|
||||||
|
"""6ef23fcf1cec7312df623f9ae701e63b550cdb8517fefd8dd398fc2acd1d935e
|
||||||
|
6e0434a2b96769078477637347b7b01924fff9ff1c06df2f804df3b0402bbb9f
|
||||||
|
87365b3c6856b45e1e2b6470986813c3816a71bff9d69dd297a5dbd935ab578f
|
||||||
|
6e5d7e93e4506a44f307c332d95e8a4b102585fd8ef9fc9e3e055537a5cec2e9"""),
|
||||||
|
("receiver_hello_packet",
|
||||||
|
"""6ef23fcf1cec7312df623f9ae701e63be36a1cdd1b19179146019984f3625d4a
|
||||||
|
6e0434a2b96769050577657247b7b02bc6c314470eca7e3ef650b98c83e9d7dd
|
||||||
|
4830b3f718ff562349aead2530a8d28a8484604f92e5fced2c6183f304344ab0
|
||||||
|
e7c301a0c05559f4c25db65e36820b4b909a226171a60ac6cb7beea09376d6d8""")
|
||||||
|
]
|
||||||
|
|
||||||
|
# Thies test vectors was copied from EIP8 specfication
|
||||||
|
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md
|
||||||
|
const eip8data = [
|
||||||
|
("initiator_private_key",
|
||||||
|
"49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee"),
|
||||||
|
("receiver_private_key",
|
||||||
|
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"),
|
||||||
|
("initiator_ephemeral_private_key",
|
||||||
|
"869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d"),
|
||||||
|
("receiver_ephemeral_private_key",
|
||||||
|
"e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4"),
|
||||||
|
("initiator_nonce",
|
||||||
|
"7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6"),
|
||||||
|
("receiver_nonce",
|
||||||
|
"559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd"),
|
||||||
|
("auth_ciphertext_v4",
|
||||||
|
"""048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29
|
||||||
|
a3d3dc6a3d89eaf913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b
|
||||||
|
22c005e9e3a49d6448610a58e98744ba3ac0399e82692d67c1f58849050b3024e
|
||||||
|
21a52c9d3b01d871ff5f210817912773e610443a9ef142e91cdba0bd77b5fdf07
|
||||||
|
69b05671fc35f83d83e4d3b0b000c6b2a1b1bba89e0fc51bf4e460df3105c444f
|
||||||
|
14be226458940d6061c296350937ffd5e3acaceeaaefd3c6f74be8e23e0f45163
|
||||||
|
cc7ebd76220f0128410fd05250273156d548a414444ae2f7dea4dfca2d43c057a
|
||||||
|
db701a715bf59f6fb66b2d1d20f2c703f851cbf5ac47396d9ca65b6260bd141ac
|
||||||
|
4d53e2de585a73d1750780db4c9ee4cd4d225173a4592ee77e2bd94d0be3691f3
|
||||||
|
b406f9bba9b591fc63facc016bfa8"""),
|
||||||
|
("auth_ciphertext_eip8",
|
||||||
|
"""01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f15
|
||||||
|
34499d3678b513b0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b15
|
||||||
|
93f0d84ac74f6e475f1b8d56116b849634a8c458705bf83a626ea0384d4d7341a
|
||||||
|
ae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6cda61110601d3b4c02ab6
|
||||||
|
c30437257a6e0117792631a4b47c1d52fc0f8f89caadeb7d02770bf999cc147d2
|
||||||
|
df3b62e1ffb2c9d8c125a3984865356266bca11ce7d3a688663a51d82defaa8aa
|
||||||
|
d69da39ab6d5470e81ec5f2a7a47fb865ff7cca21516f9299a07b1bc63ba56c7a
|
||||||
|
1a892112841ca44b6e0034dee70c9adabc15d76a54f443593fafdc3b27af80597
|
||||||
|
03f88928e199cb122362a4b35f62386da7caad09c001edaeb5f8a06d2b26fb6cb
|
||||||
|
93c52a9fca51853b68193916982358fe1e5369e249875bb8d0d0ec36f917bc5e1
|
||||||
|
eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c026
|
||||||
|
3440e2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e54767
|
||||||
|
8c5190341e4f1693956c3bf7678318e2d5b5340c9e488eefea198576344afbdf6
|
||||||
|
6db5f51204a6961a63ce072c8926c"""),
|
||||||
|
("auth_ciphertext_eip8_3f",
|
||||||
|
"""01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9
|
||||||
|
e40ff45f5bfd6f72471f93a91b493f8e00abc4b80f682973de715d77ba3a005a2
|
||||||
|
42eb859f9a211d93a347fa64b597bf280a6b88e26299cf263b01b8dfdb7122784
|
||||||
|
64fd1c25840b995e84d367d743f66c0e54a586725b7bbf12acca27170ae3283c1
|
||||||
|
073adda4b6d79f27656993aefccf16e0d0409fe07db2dc398a1b7e8ee93bcd181
|
||||||
|
485fd332f381d6a050fba4c7641a5112ac1b0b61168d20f01b479e19adf7fdbfa
|
||||||
|
0905f63352bfc7e23cf3357657455119d879c78d3cf8c8c06375f3f7d4861aa02
|
||||||
|
a122467e069acaf513025ff196641f6d2810ce493f51bee9c966b15c504350535
|
||||||
|
0392b57645385a18c78f14669cc4d960446c17571b7c5d725021babbcd786957f
|
||||||
|
3d17089c084907bda22c2b2675b4378b114c601d858802a55345a15116bc61da4
|
||||||
|
193996187ed70d16730e9ae6b3bb8787ebcaea1871d850997ddc08b4f4ea668fb
|
||||||
|
f37407ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11
|
||||||
|
f5b575a4b44e36e2bfb2f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31
|
||||||
|
aa27504e2a533af4cef3b623f4791b2cca6d490"""),
|
||||||
|
("authack_ciphertext_v4",
|
||||||
|
"""049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d
|
||||||
|
99cadddaa387662b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4
|
||||||
|
ede0e09a2d5f585b26513cb794d9635a57563921c04a9090b4f14ee42be1a5461
|
||||||
|
049af4ea7a7f49bf4c97a352d39c8d02ee4acc416388c1c66cec761d2bc1c72da
|
||||||
|
6ba143477f049c9d2dde846c252c111b904f630ac98e51609b3b1f58168ddca65
|
||||||
|
05b7196532e5f85b259a20c45e1979491683fee108e9660edbf38f3add489ae73
|
||||||
|
e3dda2c71bd1497113d5c755e942d1"""),
|
||||||
|
("authack_ciphertext_eip8",
|
||||||
|
"""01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217
|
||||||
|
c9b917788989470b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeab
|
||||||
|
bdfd1e837c1ff4cace34311cd7f4de05d59279e3524ab26ef753a0095637ac88f
|
||||||
|
2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814c4652f11b254f8a2d019
|
||||||
|
1e2f5546fae6055694aed14d906df79ad3b407d94692694e259191cde171ad542
|
||||||
|
fc588fa2b7333313d82a9f887332f1dfc36cea03f831cb9a23fea05b33deb999e
|
||||||
|
85489e645f6aab1872475d488d7bd6c7c120caf28dbfc5d6833888155ed69d34d
|
||||||
|
bdc39c1f299be1057810f34fbe754d021bfca14dc989753d61c413d261934e1a9
|
||||||
|
c67ee060a25eefb54e81a4d14baff922180c395d3f998d70f46f6b58306f96962
|
||||||
|
7ae364497e73fc27f6d17ae45a413d322cb8814276be6ddd13b885b201b943213
|
||||||
|
656cde498fa0e9ddc8e0b8f8a53824fbd82254f3e2c17e8eaea009c38b4aa0a3f
|
||||||
|
306e8797db43c25d68e86f262e564086f59a2fc60511c42abfb3057c247a8a8fe
|
||||||
|
4fb3ccbadde17514b7ac8000cdb6a912778426260c47f38919a91f25f4b5ffb45
|
||||||
|
5d6aaaf150f7e5529c100ce62d6d92826a71778d809bdf60232ae21ce8a437eca
|
||||||
|
8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc75833c2464c80524
|
||||||
|
6155289f4"""),
|
||||||
|
("authack_ciphertext_eip8_3f",
|
||||||
|
"""01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c0
|
||||||
|
9d26f7b90981cd7ae835aeac72e1573b8a0225dd56d157a010846d888dac7464b
|
||||||
|
af53f2ad4e3d584531fa203658fab03a06c9fd5e35737e417bc28c1cbf5e5dfc6
|
||||||
|
66de7090f69c3b29754725f84f75382891c561040ea1ddc0d8f381ed1b9d0d4ad
|
||||||
|
2a0ec021421d847820d6fa0ba66eaf58175f1b235e851c7e2124069fbc202888d
|
||||||
|
db3ac4d56bcbd1b9b7eab59e78f2e2d400905050f4a92dec1c4bdf797b3fc9b2f
|
||||||
|
8e84a482f3d800386186712dae00d5c386ec9387a5e9c9a1aca5a573ca91082c7
|
||||||
|
d68421f388e79127a5177d4f8590237364fd348c9611fa39f78dcdceee3f390f0
|
||||||
|
7991b7b47e1daa3ebcb6ccc9607811cb17ce51f1c8c2c5098dbdd28fca547b3f5
|
||||||
|
8c01a424ac05f869f49c6a34672ea2cbbc558428aa1fe48bbfd61158b1b735a65
|
||||||
|
d99f21e70dbc020bfdface9f724a0d1fb5895db971cc81aa7608baa0920abb0a5
|
||||||
|
65c9c436e2fd13323428296c86385f2384e408a31e104670df0791d93e743a3a5
|
||||||
|
194ee6b076fb6323ca593011b7348c16cf58f66b9633906ba54a2ee803187344b
|
||||||
|
394f75dd2e663a57b956cb830dd7a908d4f39a2336a61ef9fda549180d4ccde21
|
||||||
|
514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec035b9593b48b9d3c
|
||||||
|
a4c13d245d5f04169b0b1"""),
|
||||||
|
("auth2ack2_aes_secret",
|
||||||
|
"80e8632c05fed6fc2a13b0f8d31a3cf645366239170ea067065aba8e28bac487"),
|
||||||
|
("auth2ack2_mac_secret",
|
||||||
|
"2ea74ec5dae199227dff1af715362700e989d889d7a493cb0639691efb8e5f98"),
|
||||||
|
("auth2ack2_ingress_message", "foo"),
|
||||||
|
("auth2ack2_ingress_mac",
|
||||||
|
"0c7ec6340062cc46f5e9f1e3cf86f8c8c403c5a0964f5df0ebd34a75ddc86db5")
|
||||||
|
]
|
||||||
|
|
||||||
|
proc testValue(s: string): string =
|
||||||
|
for item in data:
|
||||||
|
if item[0] == s:
|
||||||
|
result = item[1]
|
||||||
|
break
|
||||||
|
|
||||||
|
proc testE8Value(s: string): string =
|
||||||
|
for item in eip8data:
|
||||||
|
if item[0] == s:
|
||||||
|
result = item[1]
|
||||||
|
break
|
||||||
|
|
||||||
|
suite "Ethereum P2P handshake test suite":
|
||||||
|
|
||||||
|
block:
|
||||||
|
proc newTestHandshake(flags: set[HandshakeFlag]): Handshake =
|
||||||
|
result = newHandshake(flags)
|
||||||
|
if Initiator in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testValue("initiator_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let epki = testValue("initiator_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(epki)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testValue("initiator_nonce")))
|
||||||
|
result.initiatorNonce[0..^1] = nonce[0..^1]
|
||||||
|
elif Responder in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testValue("receiver_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let epkr = testValue("receiver_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(epkr)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testValue("receiver_nonce")))
|
||||||
|
result.responderNonce[0..^1] = nonce[0..^1]
|
||||||
|
|
||||||
|
test "Create plain auth message":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize(false))
|
||||||
|
var k0 = 0
|
||||||
|
check:
|
||||||
|
initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0, 0, false) == AuthStatus.Success
|
||||||
|
var expect1 = fromHex(stripSpaces(testValue("auth_plaintext")))
|
||||||
|
var expect2 = fromHex(stripSpaces(pyevmAuth))
|
||||||
|
check:
|
||||||
|
m0[65..^1] == expect1[65..^1]
|
||||||
|
m0[0..^1] == expect2[0..^1]
|
||||||
|
|
||||||
|
test "Auth message decoding":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var k0 = 0
|
||||||
|
let remoteEPubkey0 = initiator.ephemeral.pubkey.data
|
||||||
|
let remoteHPubkey0 = initiator.host.pubkey.data
|
||||||
|
check:
|
||||||
|
initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1]
|
||||||
|
responder.remoteEPubkey.data[0..^1] == remoteEPubkey0[0..^1]
|
||||||
|
responder.remoteHPubkey.data[0..^1] == remoteHPubkey0[0..^1]
|
||||||
|
|
||||||
|
test "ACK message expectation":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var m1 = newSeq[byte](responder.ackSize(false))
|
||||||
|
var k0 = 0
|
||||||
|
var k1 = 0
|
||||||
|
var expect0 = fromHex(stripSpaces(testValue("authresp_plaintext")))
|
||||||
|
check:
|
||||||
|
initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.ackMessage(m1, k1, 0, false) == AuthStatus.Success
|
||||||
|
m1 == expect0
|
||||||
|
responder.initiatorNonce == initiator.initiatorNonce
|
||||||
|
|
||||||
|
test "ACK message decoding":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var m1 = newSeq[byte](responder.ackSize())
|
||||||
|
var k0 = 0
|
||||||
|
var k1 = 0
|
||||||
|
check:
|
||||||
|
initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.ackMessage(m1, k1) == AuthStatus.Success
|
||||||
|
initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
let remoteEPubkey0 = responder.ephemeral.pubkey.data
|
||||||
|
let remoteHPubkey0 = responder.host.pubkey.data
|
||||||
|
check:
|
||||||
|
initiator.remoteEPubkey.data[0..^1] == remoteEPubkey0[0..^1]
|
||||||
|
initiator.remoteHPubkey.data[0..^1] == remoteHPubkey0[0..^1]
|
||||||
|
initiator.responderNonce == responder.responderNonce
|
||||||
|
|
||||||
|
test "Check derived secrets":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var authm = fromHex(stripSpaces(testValue("auth_ciphertext")))
|
||||||
|
var ackm = fromHex(stripSpaces(testValue("authresp_ciphertext")))
|
||||||
|
var taes = fromHex(stripSpaces(testValue("aes_secret")))
|
||||||
|
var tmac = fromHex(stripSpaces(testValue("mac_secret")))
|
||||||
|
var temac = fromHex(stripSpaces(testValue("initial_egress_MAC")))
|
||||||
|
var timac = fromHex(stripSpaces(testValue("initial_ingress_MAC")))
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
check:
|
||||||
|
responder.decodeAuthMessage(authm) == AuthStatus.Success
|
||||||
|
initiator.decodeAckMessage(ackm) == AuthStatus.Success
|
||||||
|
initiator.getSecrets(authm, ackm, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(authm, ackm, csecResponder) == AuthStatus.Success
|
||||||
|
csecInitiator.aesKey == csecResponder.aesKey
|
||||||
|
csecInitiator.macKey == csecResponder.macKey
|
||||||
|
taes[0..^1] == csecInitiator.aesKey[0..^1]
|
||||||
|
tmac[0..^1] == csecInitiator.macKey[0..^1]
|
||||||
|
let iemac = csecInitiator.egressMac.finish()
|
||||||
|
let iimac = csecInitiator.ingressMac.finish()
|
||||||
|
let remac = csecResponder.egressMac.finish()
|
||||||
|
let rimac = csecResponder.ingressMac.finish()
|
||||||
|
check:
|
||||||
|
iemac.data[0..^1] == temac[0..^1]
|
||||||
|
iimac.data[0..^1] == timac[0..^1]
|
||||||
|
remac.data[0..^1] == timac[0..^1]
|
||||||
|
rimac.data[0..^1] == temac[0..^1]
|
||||||
|
|
||||||
|
block:
|
||||||
|
proc newTestHandshake(flags: set[HandshakeFlag]): Handshake =
|
||||||
|
result = newHandshake(flags)
|
||||||
|
if Initiator in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testE8Value("initiator_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let esec = testE8Value("initiator_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(esec)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testE8Value("initiator_nonce")))
|
||||||
|
result.initiatorNonce[0..^1] = nonce[0..^1]
|
||||||
|
elif Responder in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testE8Value("receiver_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let esec = testE8Value("receiver_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(esec)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testE8Value("receiver_nonce")))
|
||||||
|
result.responderNonce[0..^1] = nonce[0..^1]
|
||||||
|
|
||||||
|
test "AUTH/ACK v4 test vectors": # auth/ack v4
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = fromHex(stripSpaces(testE8Value("auth_ciphertext_v4")))
|
||||||
|
check:
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1]
|
||||||
|
let remoteEPubkey0 = initiator.ephemeral.pubkey.data
|
||||||
|
let remoteHPubkey0 = initiator.host.pubkey.data
|
||||||
|
check:
|
||||||
|
responder.remoteEPubkey.data[0..^1] == remoteEPubkey0[0..^1]
|
||||||
|
responder.remoteHPubkey.data[0..^1] == remoteHPubkey0[0..^1]
|
||||||
|
var m1 = fromHex(stripSpaces(testE8Value("authack_ciphertext_v4")))
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
let remoteEPubkey1 = responder.ephemeral.pubkey.data
|
||||||
|
check:
|
||||||
|
initiator.remoteEPubkey.data[0..^1] == remoteEPubkey1[0..^1]
|
||||||
|
initiator.responderNonce[0..^1] == responder.responderNonce[0..^1]
|
||||||
|
|
||||||
|
test "AUTH/ACK EIP-8 test vectors":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = fromHex(stripSpaces(testE8Value("auth_ciphertext_eip8")))
|
||||||
|
check:
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1]
|
||||||
|
let remoteEPubkey0 = initiator.ephemeral.pubkey.data
|
||||||
|
check responder.remoteEPubkey.data[0..^1] == remoteEPubkey0[0..^1]
|
||||||
|
let remoteHPubkey0 = initiator.host.pubkey.data
|
||||||
|
check responder.remoteHPubkey.data[0..^1] == remoteHPubkey0[0..^1]
|
||||||
|
var m1 = fromHex(stripSpaces(testE8Value("authack_ciphertext_eip8")))
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
let remoteEPubkey1 = responder.ephemeral.pubkey.data
|
||||||
|
check:
|
||||||
|
initiator.remoteEPubkey.data[0..^1] == remoteEPubkey1[0..^1]
|
||||||
|
initiator.responderNonce[0..^1] == responder.responderNonce[0..^1]
|
||||||
|
var taes = fromHex(stripSpaces(testE8Value("auth2ack2_aes_secret")))
|
||||||
|
var tmac = fromHex(stripSpaces(testE8Value("auth2ack2_mac_secret")))
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
check:
|
||||||
|
int(initiator.version) == 4
|
||||||
|
int(responder.version) == 4
|
||||||
|
initiator.getSecrets(m0, m1, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(m0, m1, csecResponder) == AuthStatus.Success
|
||||||
|
csecInitiator.aesKey == csecResponder.aesKey
|
||||||
|
csecInitiator.macKey == csecResponder.macKey
|
||||||
|
taes[0..^1] == csecInitiator.aesKey[0..^1]
|
||||||
|
tmac[0..^1] == csecInitiator.macKey[0..^1]
|
||||||
|
|
||||||
|
test "AUTH/ACK EIP-8 with additional fields test vectors":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = fromHex(stripSpaces(testE8Value("auth_ciphertext_eip8_3f")))
|
||||||
|
check:
|
||||||
|
responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
responder.initiatorNonce[0..^1] == initiator.initiatorNonce[0..^1]
|
||||||
|
let remoteEPubkey0 = initiator.ephemeral.pubkey.data
|
||||||
|
let remoteHPubkey0 = initiator.host.pubkey.data
|
||||||
|
check:
|
||||||
|
responder.remoteEPubkey.data[0..^1] == remoteEPubkey0[0..^1]
|
||||||
|
responder.remoteHPubkey.data[0..^1] == remoteHPubkey0[0..^1]
|
||||||
|
var m1 = fromHex(stripSpaces(testE8Value("authack_ciphertext_eip8_3f")))
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
let remoteEPubkey1 = responder.ephemeral.pubkey.data
|
||||||
|
check:
|
||||||
|
int(initiator.version) == 57
|
||||||
|
int(responder.version) == 56
|
||||||
|
initiator.remoteEPubkey.data[0..^1] == remoteEPubkey1[0..^1]
|
||||||
|
initiator.responderNonce[0..^1] == responder.responderNonce[0..^1]
|
||||||
|
|
||||||
|
test "100 AUTH/ACK EIP-8 handshakes":
|
||||||
|
for i in 1..100:
|
||||||
|
var initiator = newTestHandshake({Initiator, EIP8})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
var k0 = 0
|
||||||
|
var k1 = 0
|
||||||
|
check initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
m0.setLen(k0)
|
||||||
|
check responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
check (EIP8 in responder.flags) == true
|
||||||
|
var m1 = newSeq[byte](responder.ackSize())
|
||||||
|
check responder.ackMessage(m1, k1) == AuthStatus.Success
|
||||||
|
m1.setLen(k1)
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
check:
|
||||||
|
initiator.getSecrets(m0, m1, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(m0, m1, csecResponder) == AuthStatus.Success
|
||||||
|
csecInitiator.aesKey == csecResponder.aesKey
|
||||||
|
csecInitiator.macKey == csecResponder.macKey
|
||||||
|
|
||||||
|
test "100 AUTH/ACK V4 handshakes":
|
||||||
|
for i in 1..100:
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
var k0 = 0
|
||||||
|
var k1 = 0
|
||||||
|
check initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
m0.setLen(k0)
|
||||||
|
check responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
var m1 = newSeq[byte](responder.ackSize())
|
||||||
|
check responder.ackMessage(m1, k1) == AuthStatus.Success
|
||||||
|
m1.setLen(k1)
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
|
||||||
|
check:
|
||||||
|
initiator.getSecrets(m0, m1, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(m0, m1, csecResponder) == AuthStatus.Success
|
||||||
|
csecInitiator.aesKey == csecResponder.aesKey
|
||||||
|
csecInitiator.macKey == csecResponder.macKey
|
|
@ -0,0 +1,255 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# See the file "LICENSE", included in this
|
||||||
|
# distribution, for details about the copyright.
|
||||||
|
#
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import eth/keys, nimcrypto/[utils, sysrand, keccak]
|
||||||
|
import eth/p2p/[auth, rlpxcrypt]
|
||||||
|
|
||||||
|
const data = [
|
||||||
|
("initiator_private_key",
|
||||||
|
"5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051"),
|
||||||
|
("receiver_private_key",
|
||||||
|
"c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"),
|
||||||
|
("initiator_ephemeral_private_key",
|
||||||
|
"19c2185f4f40634926ebed3af09070ca9e029f2edd5fae6253074896205f5f6c"),
|
||||||
|
("receiver_ephemeral_private_key",
|
||||||
|
"d25688cf0ab10afa1a0e2dba7853ed5f1e5bf1c631757ed4e103b593ff3f5620"),
|
||||||
|
("auth_plaintext",
|
||||||
|
"""884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f
|
||||||
|
46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa57
|
||||||
|
0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46
|
||||||
|
cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406
|
||||||
|
4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74
|
||||||
|
d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb
|
||||||
|
1100"""),
|
||||||
|
("authresp_plaintext",
|
||||||
|
"""802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e
|
||||||
|
9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397
|
||||||
|
f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7
|
||||||
|
00"""),
|
||||||
|
("auth_ciphertext",
|
||||||
|
"""04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc
|
||||||
|
43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a
|
||||||
|
514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a
|
||||||
|
2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c540
|
||||||
|
4a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d
|
||||||
|
2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312
|
||||||
|
021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e77
|
||||||
|
23eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93
|
||||||
|
d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576d
|
||||||
|
c017fdd3d581e83cfd26cf125b6d2bda1f1d56"""),
|
||||||
|
("authresp_ciphertext",
|
||||||
|
"""049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f
|
||||||
|
5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42
|
||||||
|
dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b79
|
||||||
|
9771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92b
|
||||||
|
b9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f
|
||||||
|
09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310
|
||||||
|
d2cc8b1b9334728f97220bb799376233e113"""),
|
||||||
|
("ecdhe_shared_secret",
|
||||||
|
"e3f407f83fc012470c26a93fdff534100f2c6f736439ce0ca90e9914f7d1c381"),
|
||||||
|
("initiator_nonce",
|
||||||
|
"cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb11"),
|
||||||
|
("receiver_nonce",
|
||||||
|
"f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7"),
|
||||||
|
("aes_secret",
|
||||||
|
"c0458fa97a5230830e05f4f20b7c755c1d4e54b1ce5cf43260bb191eef4e418d"),
|
||||||
|
("mac_secret",
|
||||||
|
"48c938884d5067a1598272fcddaa4b833cd5e7d92e8228c0ecdfabbe68aef7f1"),
|
||||||
|
("token",
|
||||||
|
"3f9ec2592d1554852b1f54d228f042ed0a9310ea86d038dc2b401ba8cd7fdac4"),
|
||||||
|
("initial_egress_MAC",
|
||||||
|
"09771e93b1a6109e97074cbe2d2b0cf3d3878efafe68f53c41bb60c0ec49097e"),
|
||||||
|
("initial_ingress_MAC",
|
||||||
|
"75823d96e23136c89666ee025fb21a432be906512b3dd4a3049e898adb433847"),
|
||||||
|
("initiator_hello_packet",
|
||||||
|
"""6ef23fcf1cec7312df623f9ae701e63b550cdb8517fefd8dd398fc2acd1d935e
|
||||||
|
6e0434a2b96769078477637347b7b01924fff9ff1c06df2f804df3b0402bbb9f
|
||||||
|
87365b3c6856b45e1e2b6470986813c3816a71bff9d69dd297a5dbd935ab578f
|
||||||
|
6e5d7e93e4506a44f307c332d95e8a4b102585fd8ef9fc9e3e055537a5cec2e9"""),
|
||||||
|
("receiver_hello_packet",
|
||||||
|
"""6ef23fcf1cec7312df623f9ae701e63be36a1cdd1b19179146019984f3625d4a
|
||||||
|
6e0434a2b96769050577657247b7b02bc6c314470eca7e3ef650b98c83e9d7dd
|
||||||
|
4830b3f718ff562349aead2530a8d28a8484604f92e5fced2c6183f304344ab0
|
||||||
|
e7c301a0c05559f4c25db65e36820b4b909a226171a60ac6cb7beea09376d6d8""")
|
||||||
|
]
|
||||||
|
|
||||||
|
proc testValue(s: string): string =
|
||||||
|
for item in data:
|
||||||
|
if item[0] == s:
|
||||||
|
result = item[1]
|
||||||
|
break
|
||||||
|
|
||||||
|
suite "Ethereum RLPx encryption/decryption test suite":
|
||||||
|
proc newTestHandshake(flags: set[HandshakeFlag]): Handshake =
|
||||||
|
result = newHandshake(flags)
|
||||||
|
if Initiator in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testValue("initiator_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let epki = testValue("initiator_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(epki)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testValue("initiator_nonce")))
|
||||||
|
result.initiatorNonce[0..^1] = nonce[0..^1]
|
||||||
|
elif Responder in flags:
|
||||||
|
result.host.seckey = initPrivateKey(testValue("receiver_private_key"))
|
||||||
|
result.host.pubkey = result.host.seckey.getPublicKey()
|
||||||
|
let epkr = testValue("receiver_ephemeral_private_key")
|
||||||
|
result.ephemeral.seckey = initPrivateKey(epkr)
|
||||||
|
result.ephemeral.pubkey = result.ephemeral.seckey.getPublicKey()
|
||||||
|
let nonce = fromHex(stripSpaces(testValue("receiver_nonce")))
|
||||||
|
result.responderNonce[0..^1] = nonce[0..^1]
|
||||||
|
|
||||||
|
test "Encrypt/Decrypt Hello packet test vectors":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var authm = fromHex(stripSpaces(testValue("auth_ciphertext")))
|
||||||
|
var ackm = fromHex(stripSpaces(testValue("authresp_ciphertext")))
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
var stateInitiator0, stateInitiator1: SecretState
|
||||||
|
var stateResponder0, stateResponder1: SecretState
|
||||||
|
check:
|
||||||
|
responder.decodeAuthMessage(authm) == AuthStatus.Success
|
||||||
|
initiator.decodeAckMessage(ackm) == AuthStatus.Success
|
||||||
|
initiator.getSecrets(authm, ackm, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(authm, ackm, csecResponder) == AuthStatus.Success
|
||||||
|
initSecretState(csecInitiator, stateInitiator0)
|
||||||
|
initSecretState(csecResponder, stateResponder0)
|
||||||
|
initSecretState(csecInitiator, stateInitiator1)
|
||||||
|
initSecretState(csecResponder, stateResponder1)
|
||||||
|
var packet0 = testValue("initiator_hello_packet")
|
||||||
|
var initiatorHello = fromHex(stripSpaces(packet0))
|
||||||
|
var packet1 = testValue("receiver_hello_packet")
|
||||||
|
var responderHello = fromHex(stripSpaces(packet1))
|
||||||
|
var header: array[RlpHeaderLength, byte]
|
||||||
|
|
||||||
|
block:
|
||||||
|
check stateResponder0.decryptHeader(toOpenArray(initiatorHello, 0, 31),
|
||||||
|
header) == RlpxStatus.Success
|
||||||
|
let bodysize = getBodySize(header)
|
||||||
|
check bodysize == 79
|
||||||
|
# we need body size to be rounded to 16 bytes boundary to properly
|
||||||
|
# encrypt/decrypt it.
|
||||||
|
var body = newSeq[byte](decryptedLength(bodysize))
|
||||||
|
var decrsize = 0
|
||||||
|
check:
|
||||||
|
stateResponder0.decryptBody(
|
||||||
|
toOpenArray(initiatorHello, 32, len(initiatorHello) - 1),
|
||||||
|
getBodySize(header), body, decrsize) == RlpxStatus.Success
|
||||||
|
decrsize == 79
|
||||||
|
body.setLen(decrsize)
|
||||||
|
var hello = newSeq[byte](encryptedLength(bodysize))
|
||||||
|
check:
|
||||||
|
stateInitiator1.encrypt(header, body, hello) == RlpxStatus.Success
|
||||||
|
hello == initiatorHello
|
||||||
|
block:
|
||||||
|
check stateInitiator0.decryptHeader(toOpenArray(responderHello, 0, 31),
|
||||||
|
header) == RlpxStatus.Success
|
||||||
|
let bodysize = getBodySize(header)
|
||||||
|
check bodysize == 79
|
||||||
|
# we need body size to be rounded to 16 bytes boundary to properly
|
||||||
|
# encrypt/decrypt it.
|
||||||
|
var body = newSeq[byte](decryptedLength(bodysize))
|
||||||
|
var decrsize = 0
|
||||||
|
check:
|
||||||
|
stateInitiator0.decryptBody(
|
||||||
|
toOpenArray(responderHello, 32, len(initiatorHello) - 1),
|
||||||
|
getBodySize(header), body, decrsize) == RlpxStatus.Success
|
||||||
|
decrsize == 79
|
||||||
|
body.setLen(decrsize)
|
||||||
|
var hello = newSeq[byte](encryptedLength(bodysize))
|
||||||
|
check:
|
||||||
|
stateResponder1.encrypt(header, body, hello) == RlpxStatus.Success
|
||||||
|
hello == responderHello
|
||||||
|
|
||||||
|
test "Continuous stream of different lengths (1000 times)":
|
||||||
|
var initiator = newTestHandshake({Initiator})
|
||||||
|
var responder = newTestHandshake({Responder})
|
||||||
|
var m0 = newSeq[byte](initiator.authSize())
|
||||||
|
var csecInitiator: ConnectionSecret
|
||||||
|
var csecResponder: ConnectionSecret
|
||||||
|
var k0 = 0
|
||||||
|
var k1 = 0
|
||||||
|
check initiator.authMessage(responder.host.pubkey,
|
||||||
|
m0, k0) == AuthStatus.Success
|
||||||
|
m0.setLen(k0)
|
||||||
|
check responder.decodeAuthMessage(m0) == AuthStatus.Success
|
||||||
|
var m1 = newSeq[byte](responder.ackSize())
|
||||||
|
check responder.ackMessage(m1, k1) == AuthStatus.Success
|
||||||
|
m1.setLen(k1)
|
||||||
|
check initiator.decodeAckMessage(m1) == AuthStatus.Success
|
||||||
|
|
||||||
|
check:
|
||||||
|
initiator.getSecrets(m0, m1, csecInitiator) == AuthStatus.Success
|
||||||
|
responder.getSecrets(m0, m1, csecResponder) == AuthStatus.Success
|
||||||
|
var stateInitiator: SecretState
|
||||||
|
var stateResponder: SecretState
|
||||||
|
var iheader, rheader: array[16, byte]
|
||||||
|
initSecretState(csecInitiator, stateInitiator)
|
||||||
|
initSecretState(csecResponder, stateResponder)
|
||||||
|
burnMem(iheader)
|
||||||
|
burnMem(rheader)
|
||||||
|
for i in 1..1000:
|
||||||
|
# initiator -> responder
|
||||||
|
block:
|
||||||
|
var ibody = newSeq[byte](i)
|
||||||
|
var encrypted = newSeq[byte](encryptedLength(len(ibody)))
|
||||||
|
iheader[0] = byte((len(ibody) shr 16) and 0xFF)
|
||||||
|
iheader[1] = byte((len(ibody) shr 8) and 0xFF)
|
||||||
|
iheader[2] = byte(len(ibody) and 0xFF)
|
||||||
|
check:
|
||||||
|
randomBytes(ibody) == len(ibody)
|
||||||
|
stateInitiator.encrypt(iheader, ibody,
|
||||||
|
encrypted) == RlpxStatus.Success
|
||||||
|
stateResponder.decryptHeader(toOpenArray(encrypted, 0, 31),
|
||||||
|
rheader) == RlpxStatus.Success
|
||||||
|
var length = getBodySize(rheader)
|
||||||
|
check length == len(ibody)
|
||||||
|
var rbody = newSeq[byte](decryptedLength(length))
|
||||||
|
var decrsize = 0
|
||||||
|
check:
|
||||||
|
stateResponder.decryptBody(
|
||||||
|
toOpenArray(encrypted, 32, len(encrypted) - 1),
|
||||||
|
length, rbody, decrsize) == RlpxStatus.Success
|
||||||
|
decrsize == length
|
||||||
|
rbody.setLen(decrsize)
|
||||||
|
check:
|
||||||
|
iheader == rheader
|
||||||
|
ibody == rbody
|
||||||
|
burnMem(iheader)
|
||||||
|
burnMem(rheader)
|
||||||
|
# responder -> initiator
|
||||||
|
block:
|
||||||
|
var ibody = newSeq[byte](i * 3)
|
||||||
|
var encrypted = newSeq[byte](encryptedLength(len(ibody)))
|
||||||
|
iheader[0] = byte((len(ibody) shr 16) and 0xFF)
|
||||||
|
iheader[1] = byte((len(ibody) shr 8) and 0xFF)
|
||||||
|
iheader[2] = byte(len(ibody) and 0xFF)
|
||||||
|
check:
|
||||||
|
randomBytes(ibody) == len(ibody)
|
||||||
|
stateResponder.encrypt(iheader, ibody,
|
||||||
|
encrypted) == RlpxStatus.Success
|
||||||
|
stateInitiator.decryptHeader(toOpenArray(encrypted, 0, 31),
|
||||||
|
rheader) == RlpxStatus.Success
|
||||||
|
var length = getBodySize(rheader)
|
||||||
|
check length == len(ibody)
|
||||||
|
var rbody = newSeq[byte](decryptedLength(length))
|
||||||
|
var decrsize = 0
|
||||||
|
check:
|
||||||
|
stateInitiator.decryptBody(
|
||||||
|
toOpenArray(encrypted, 32, len(encrypted) - 1),
|
||||||
|
length, rbody, decrsize) == RlpxStatus.Success
|
||||||
|
decrsize == length
|
||||||
|
rbody.setLen(length)
|
||||||
|
check:
|
||||||
|
iheader == rheader
|
||||||
|
ibody == rbody
|
||||||
|
burnMem(iheader)
|
||||||
|
burnMem(rheader)
|
|
@ -0,0 +1,57 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# See the file "LICENSE", included in this
|
||||||
|
# distribution, for details about the copyright.
|
||||||
|
#
|
||||||
|
|
||||||
|
import sequtils, logging
|
||||||
|
import eth/keys, asyncdispatch2, byteutils
|
||||||
|
import eth/p2p/[discovery, kademlia, enode]
|
||||||
|
|
||||||
|
const clientId = "nim-eth-p2p/0.0.1"
|
||||||
|
|
||||||
|
addHandler(newConsoleLogger())
|
||||||
|
|
||||||
|
proc startDiscoveryNode(privKey: PrivateKey, address: Address, bootnodes: seq[ENode]): Future[DiscoveryProtocol] {.async.} =
|
||||||
|
result = newDiscoveryProtocol(privKey, address, bootnodes)
|
||||||
|
result.open()
|
||||||
|
await result.bootstrap()
|
||||||
|
|
||||||
|
proc localAddress(port: int): Address =
|
||||||
|
let port = Port(port)
|
||||||
|
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
|
let
|
||||||
|
bootNodeKey = initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
|
||||||
|
bootNodeAddr = localAddress(20301)
|
||||||
|
bootENode = initENode(bootNodeKey.getPublicKey, bootNodeAddr)
|
||||||
|
|
||||||
|
nodeKeys = [
|
||||||
|
initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a618"),
|
||||||
|
initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a619"),
|
||||||
|
initPrivateKey("a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a620")
|
||||||
|
]
|
||||||
|
proc nodeIdInNodes(id: NodeId, nodes: openarray[Node]): bool =
|
||||||
|
for n in nodes:
|
||||||
|
if id == n.id: return true
|
||||||
|
|
||||||
|
proc test() {.async.} =
|
||||||
|
let bootNode = await startDiscoveryNode(bootNodeKey, bootNodeAddr, @[])
|
||||||
|
|
||||||
|
var nodeAddrs = newSeqOfCap[Address](nodeKeys.len)
|
||||||
|
for i in 0 ..< nodeKeys.len: nodeAddrs.add(localAddress(20302 + i))
|
||||||
|
|
||||||
|
var nodes = await all(zip(nodeKeys, nodeAddrs).mapIt(
|
||||||
|
startDiscoveryNode(it.a, it.b, @[bootENode]))
|
||||||
|
)
|
||||||
|
nodes.add(bootNode)
|
||||||
|
|
||||||
|
for i in nodes:
|
||||||
|
for j in nodes:
|
||||||
|
if j != i:
|
||||||
|
doAssert(nodeIdInNodes(i.thisNode.id, j.randomNodes(nodes.len - 1)))
|
||||||
|
|
||||||
|
waitFor test()
|
|
@ -0,0 +1,171 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# See the file "LICENSE", included in this
|
||||||
|
# distribution, for details about the copyright.
|
||||||
|
#
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import eth/keys, nimcrypto/[utils, sha2, hmac, rijndael]
|
||||||
|
import eth/p2p/ecies
|
||||||
|
|
||||||
|
proc compare[A, B](x: openarray[A], y: openarray[B], s: int = 0): bool =
|
||||||
|
result = true
|
||||||
|
assert(s >= 0)
|
||||||
|
var size = if s == 0: min(len(x), len(y)) else: min(s, min(len(x), len(y)))
|
||||||
|
for i in 0..(size - 1):
|
||||||
|
if x[i] != cast[A](y[i]):
|
||||||
|
result = false
|
||||||
|
break
|
||||||
|
|
||||||
|
template offsetOf(a, b): int =
|
||||||
|
cast[int](cast[uint](unsafeAddr b) - cast[uint](unsafeAddr a))
|
||||||
|
|
||||||
|
suite "ECIES test suite":
|
||||||
|
test "ECIES structures alignment":
|
||||||
|
var header: EciesHeader
|
||||||
|
check:
|
||||||
|
offsetOf(header, header.version) == 0
|
||||||
|
offsetOf(header, header.pubkey) == 1
|
||||||
|
offsetOf(header, header.iv) == 1 + 64
|
||||||
|
offsetOf(header, header.data) == 1 + 64 + aes128.sizeBlock
|
||||||
|
sizeof(header) == 1 + 64 + aes128.sizeBlock + 1
|
||||||
|
|
||||||
|
test "KDF test vectors":
|
||||||
|
# KDF test
|
||||||
|
# Copied from https://github.com/ethereum/pydevp2p/blob/develop/devp2p/tests/test_ecies.py#L53
|
||||||
|
let m0 = "961c065873443014e0371f1ed656c586c6730bf927415757f389d92acf8268df"
|
||||||
|
let c0 = "4050c52e6d9c08755e5a818ac66fabe478b825b1836fd5efc4d44e40d04dabcc"
|
||||||
|
var m = fromHex(stripSpaces(m0))
|
||||||
|
var c = fromHex(stripSpaces(c0))
|
||||||
|
var k = kdf(m)
|
||||||
|
check compare(k, c) == true
|
||||||
|
|
||||||
|
test "HMAC-SHA256 test vectors":
|
||||||
|
# HMAC-SHA256 test
|
||||||
|
# https://github.com/ethereum/py-evm/blob/master/tests/p2p/test_ecies.py#L64-L76
|
||||||
|
const keys = [
|
||||||
|
"07a4b6dfa06369a570f2dcba2f11a18f",
|
||||||
|
"af6623e52208c596e17c72cea6f1cb09"
|
||||||
|
]
|
||||||
|
const datas = ["4dcb92ed4fc67fe86832", "3461282bcedace970df2"]
|
||||||
|
const expects = [
|
||||||
|
"c90b62b1a673b47df8e395e671a68bfa68070d6e2ef039598bb829398b89b9a9",
|
||||||
|
"b3ce623bce08d5793677ba9441b22bb34d3e8a7de964206d26589df3e8eb5183"
|
||||||
|
]
|
||||||
|
for i in 0..1:
|
||||||
|
var k = fromHex(stripSpaces(keys[i]))
|
||||||
|
var m = fromHex(stripSpaces(datas[i]))
|
||||||
|
var digest = sha256.hmac(k, m).data
|
||||||
|
var expect = fromHex(stripSpaces(expects[i]))
|
||||||
|
check compare(digest, expect) == true
|
||||||
|
|
||||||
|
test "ECIES \"Hello World!\" encryption/decryption test":
|
||||||
|
# ECIES encryption
|
||||||
|
var m = "Hello World!"
|
||||||
|
var plain = cast[seq[byte]](m)
|
||||||
|
var encr = newSeq[byte](eciesEncryptedLength(len(m)))
|
||||||
|
var decr = newSeq[byte](len(m))
|
||||||
|
var shmac = [0x13'u8, 0x13'u8]
|
||||||
|
var s = newPrivateKey()
|
||||||
|
var p = s.getPublicKey()
|
||||||
|
check:
|
||||||
|
# Without additional mac data
|
||||||
|
eciesEncrypt(plain, encr, p) == EciesStatus.Success
|
||||||
|
eciesDecrypt(encr, decr, s) == EciesStatus.Success
|
||||||
|
equalMem(addr m[0], addr decr[0], len(m))
|
||||||
|
# With additional mac data
|
||||||
|
eciesEncrypt(plain, encr, p, shmac) == EciesStatus.Success
|
||||||
|
eciesDecrypt(encr, decr, s, shmac) == EciesStatus.Success
|
||||||
|
equalMem(addr m[0], addr decr[0], len(m))
|
||||||
|
|
||||||
|
test "ECIES/py-evm/cpp-ethereum test_ecies.py#L43/rlpx.cpp#L187":
|
||||||
|
# ECIES
|
||||||
|
# https://github.com/ethereum/py-evm/blob/master/tests/p2p/test_ecies.py#L43
|
||||||
|
# https://github.com/ethereum/cpp-ethereum/blob/develop/test/unittests/libp2p/rlpx.cpp#L187
|
||||||
|
const secretKeys = [
|
||||||
|
"c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8",
|
||||||
|
"5e173f6ac3c669587538e7727cf19b782a4f2fda07c1eaa662c593e5e85e3051"
|
||||||
|
]
|
||||||
|
const cipherText = [
|
||||||
|
"""04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc
|
||||||
|
43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a
|
||||||
|
514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a
|
||||||
|
2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c540
|
||||||
|
4a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d
|
||||||
|
2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312
|
||||||
|
021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e77
|
||||||
|
23eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93
|
||||||
|
d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576d
|
||||||
|
c017fdd3d581e83cfd26cf125b6d2bda1f1d56""",
|
||||||
|
"""049934a7b2d7f9af8fd9db941d9da281ac9381b5740e1f64f7092f3588d4f87f
|
||||||
|
5ce55191a6653e5e80c1c5dd538169aa123e70dc6ffc5af1827e546c0e958e42
|
||||||
|
dad355bcc1fcb9cdf2cf47ff524d2ad98cbf275e661bf4cf00960e74b5956b79
|
||||||
|
9771334f426df007350b46049adb21a6e78ab1408d5e6ccde6fb5e69f0f4c92b
|
||||||
|
b9c725c02f99fa72b9cdc8dd53cff089e0e73317f61cc5abf6152513cb7d833f
|
||||||
|
09d2851603919bf0fbe44d79a09245c6e8338eb502083dc84b846f2fee1cc310
|
||||||
|
d2cc8b1b9334728f97220bb799376233e113"""
|
||||||
|
]
|
||||||
|
const expectText = [
|
||||||
|
"""884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f
|
||||||
|
46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa57
|
||||||
|
0034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46
|
||||||
|
cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae406
|
||||||
|
4abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74
|
||||||
|
d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb
|
||||||
|
1100""",
|
||||||
|
"""802b052f8b066640bba94a4fc39d63815c377fced6fcb84d27f791c9921ddf3e
|
||||||
|
9bf0108e298f490812847109cbd778fae393e80323fd643209841a3b7f110397
|
||||||
|
f37ec61d84cea03dcc5e8385db93248584e8af4b4d1c832d8c7453c0089687a7
|
||||||
|
00"""
|
||||||
|
]
|
||||||
|
var data: array[1024, byte]
|
||||||
|
for i in 0..1:
|
||||||
|
var s = initPrivateKey(secretKeys[i])
|
||||||
|
var cipher = fromHex(stripSpaces(cipherText[i]))
|
||||||
|
var expect = fromHex(stripSpaces(expectText[i]))
|
||||||
|
check:
|
||||||
|
eciesDecrypt(cipher, data, s) == EciesStatus.Success
|
||||||
|
compare(data, expect) == true
|
||||||
|
|
||||||
|
test "ECIES/cpp-ethereum rlpx.cpp#L432-L459":
|
||||||
|
# ECIES
|
||||||
|
# https://github.com/ethereum/cpp-ethereum/blob/develop/test/unittests/libp2p/rlpx.cpp#L432-L459
|
||||||
|
const secretKeys = [
|
||||||
|
"57baf2c62005ddec64c357d96183ebc90bf9100583280e848aa31d683cad73cb",
|
||||||
|
"472413e97f1fd58d84e28a559479e6b6902d2e8a0cee672ef38a3a35d263886b",
|
||||||
|
"472413e97f1fd58d84e28a559479e6b6902d2e8a0cee672ef38a3a35d263886b",
|
||||||
|
"472413e97f1fd58d84e28a559479e6b6902d2e8a0cee672ef38a3a35d263886b"
|
||||||
|
]
|
||||||
|
const cipherData = [
|
||||||
|
"""04ff2c874d0a47917c84eea0b2a4141ca95233720b5c70f81a8415bae1dc7b74
|
||||||
|
6b61df7558811c1d6054333907333ef9bb0cc2fbf8b34abb9730d14e0140f455
|
||||||
|
3f4b15d705120af46cf653a1dc5b95b312cf8444714f95a4f7a0425b67fc064d
|
||||||
|
18f4d0a528761565ca02d97faffdac23de10""",
|
||||||
|
"""046f647e1bd8a5cd1446d31513bac233e18bdc28ec0e59d46de453137a725995
|
||||||
|
33f1e97c98154343420d5f16e171e5107999a7c7f1a6e26f57bcb0d2280655d0
|
||||||
|
8fb148d36f1d4b28642d3bb4a136f0e33e3dd2e3cffe4b45a03fb7c5b5ea5e65
|
||||||
|
617250fdc89e1a315563c20504b9d3a72555""",
|
||||||
|
"""0443c24d6ccef3ad095140760bb143078b3880557a06392f17c5e368502d7953
|
||||||
|
2bc18903d59ced4bbe858e870610ab0d5f8b7963dd5c9c4cf81128d10efd7c7a
|
||||||
|
a80091563c273e996578403694673581829e25a865191bdc9954db14285b56eb
|
||||||
|
0043b6288172e0d003c10f42fe413222e273d1d4340c38a2d8344d7aadcbc846
|
||||||
|
ee""",
|
||||||
|
"""04c4e40c86bb5324e017e598c6d48c19362ae527af8ab21b077284a4656c8735
|
||||||
|
e62d73fb3d740acefbec30ca4c024739a1fcdff69ecaf03301eebf156eb5f17c
|
||||||
|
ca6f9d7a7e214a1f3f6e34d1ee0ec00ce0ef7d2b242fbfec0f276e17941f9f1b
|
||||||
|
fbe26de10a15a6fac3cda039904ddd1d7e06e7b96b4878f61860e47f0b84c8ce
|
||||||
|
b64f6a900ff23844f4359ae49b44154980a626d3c73226c19e"""
|
||||||
|
]
|
||||||
|
const expectData = [
|
||||||
|
"a", "a", "aaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||||
|
]
|
||||||
|
var data: array[1024, byte]
|
||||||
|
for i in 0..3:
|
||||||
|
var s = initPrivateKey(secretKeys[i])
|
||||||
|
var cipher = fromHex(stripSpaces(cipherData[i]))
|
||||||
|
check:
|
||||||
|
eciesDecrypt(cipher, data, s) == EciesStatus.Success
|
||||||
|
compare(data, expectData[i]) == true
|
|
@ -0,0 +1,100 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import unittest, net
|
||||||
|
import eth/p2p/enode
|
||||||
|
|
||||||
|
suite "ENode":
|
||||||
|
test "Go-Ethereum tests":
|
||||||
|
const enodes = [
|
||||||
|
"http://foobar",
|
||||||
|
"enode://01010101@123.124.125.126:3",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@hostname:3",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:foo",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:3?discport=foo",
|
||||||
|
"01010101",
|
||||||
|
"enode://01010101",
|
||||||
|
"://foo",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[::]:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150?discport=22334",
|
||||||
|
]
|
||||||
|
|
||||||
|
const results = [
|
||||||
|
IncorrectScheme,
|
||||||
|
IncorrectNodeId,
|
||||||
|
IncorrectIP,
|
||||||
|
IncorrectPort,
|
||||||
|
IncorrectDiscPort,
|
||||||
|
IncorrectScheme,
|
||||||
|
IncorrectNodeId,
|
||||||
|
IncorrectScheme,
|
||||||
|
ENodeStatus.Success,
|
||||||
|
ENodeStatus.Success,
|
||||||
|
ENodeStatus.Success,
|
||||||
|
ENodeStatus.Success
|
||||||
|
]
|
||||||
|
|
||||||
|
for index in 0..<len(enodes):
|
||||||
|
var node: ENode
|
||||||
|
let res = initENode(enodes[index], node)
|
||||||
|
check res == results[index]
|
||||||
|
if res == ENodeStatus.Success:
|
||||||
|
check enodes[index] == $node
|
||||||
|
|
||||||
|
test "Custom validation tests":
|
||||||
|
const enodes = [
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@256.0.0.1:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.256.0.1:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.256.1:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.256:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439:@1.1.1.255:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439:bar@1.1.1.255:52150",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.255:-1",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.255:65536",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.255:1024?discport=-1",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.255:1024?discport=65536",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@1.1.1.255:1024?discport=65535#bar",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@",
|
||||||
|
"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a43Z@1.1.1.1:25?discport=22"
|
||||||
|
]
|
||||||
|
|
||||||
|
const results = [
|
||||||
|
IncorrectIP,
|
||||||
|
IncorrectIP,
|
||||||
|
IncorrectIP,
|
||||||
|
IncorrectIP,
|
||||||
|
Success,
|
||||||
|
IncorrectUri,
|
||||||
|
IncorrectPort,
|
||||||
|
IncorrectPort,
|
||||||
|
IncorrectDiscPort,
|
||||||
|
IncorrectDiscPort,
|
||||||
|
IncorrectUri,
|
||||||
|
IncorrectIP,
|
||||||
|
IncorrectNodeId
|
||||||
|
]
|
||||||
|
|
||||||
|
for index in 0..<len(enodes):
|
||||||
|
var node: ENode
|
||||||
|
let res = initENode(enodes[index], node)
|
||||||
|
check res == results[index]
|
||||||
|
|
||||||
|
test "isCorrect() tests":
|
||||||
|
var node: ENode
|
||||||
|
check isCorrect(node) == false
|
||||||
|
node.address.ip.family = IpAddressFamily.IPv4
|
||||||
|
check isCorrect(node) == false
|
||||||
|
node.address.tcpPort = Port(25)
|
||||||
|
check isCorrect(node) == false
|
||||||
|
node.address.udpPort = Port(25)
|
||||||
|
check isCorrect(node) == false
|
||||||
|
node.pubkey.data[0] = 1'u8
|
||||||
|
check isCorrect(node) == true
|
|
@ -0,0 +1,370 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import
|
||||||
|
sequtils, options, unittest, times, tables,
|
||||||
|
nimcrypto/hash,
|
||||||
|
eth/[keys, rlp],
|
||||||
|
eth/p2p/rlpx_protocols/whisper_protocol as whisper
|
||||||
|
|
||||||
|
suite "Whisper payload":
|
||||||
|
test "should roundtrip without keys":
|
||||||
|
let payload = Payload(payload: @[byte 0, 1, 2])
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
|
||||||
|
|
||||||
|
test "should roundtrip with symmetric encryption":
|
||||||
|
var symKey: SymKey
|
||||||
|
let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2])
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get(), symKey = some(symKey))
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
|
||||||
|
|
||||||
|
test "should roundtrip with signature":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
|
||||||
|
let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2])
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
privKey.getPublicKey() == decoded.get().src.get()
|
||||||
|
decoded.get().padding.get().len == 186 # 256 -1 -1 -3 -65
|
||||||
|
|
||||||
|
test "should roundtrip with asymmetric encryption":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
|
||||||
|
let payload = Payload(dst: some(privKey.getPublicKey()),
|
||||||
|
payload: @[byte 0, 1, 2])
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get(), dst = some(privKey))
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.get().len == 251 # 256 -1 -1 -3
|
||||||
|
|
||||||
|
test "should return specified bloom":
|
||||||
|
# Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/whisper/whisperv6/whisper_test.go#L834
|
||||||
|
let top0 = [byte 0, 0, 255, 6]
|
||||||
|
var x: Bloom
|
||||||
|
x[0] = byte 1
|
||||||
|
x[32] = byte 1
|
||||||
|
x[^1] = byte 128
|
||||||
|
check @(top0.topicBloom) == @x
|
||||||
|
|
||||||
|
suite "Whisper payload padding":
|
||||||
|
test "should do max padding":
|
||||||
|
let payload = Payload(payload: repeat(byte 1, 254))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
decoded.get().padding.get().len == 256 # as dataLen == 256
|
||||||
|
|
||||||
|
test "should do max padding with signature":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
|
||||||
|
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
privKey.getPublicKey() == decoded.get().src.get()
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
decoded.get().padding.get().len == 256 # as dataLen == 256
|
||||||
|
|
||||||
|
test "should do min padding":
|
||||||
|
let payload = Payload(payload: repeat(byte 1, 253))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
decoded.get().padding.get().len == 1 # as dataLen == 255
|
||||||
|
|
||||||
|
test "should do min padding with signature":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
|
||||||
|
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
privKey.getPublicKey() == decoded.get().src.get()
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
decoded.get().padding.get().len == 1 # as dataLen == 255
|
||||||
|
|
||||||
|
test "should roundtrip custom padding":
|
||||||
|
let payload = Payload(payload: repeat(byte 1, 10),
|
||||||
|
padding: some(repeat(byte 2, 100)))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
payload.padding.get() == decoded.get().padding.get()
|
||||||
|
|
||||||
|
test "should roundtrip custom 0 padding":
|
||||||
|
let padding: seq[byte] = @[]
|
||||||
|
let payload = Payload(payload: repeat(byte 1, 10),
|
||||||
|
padding: some(padding))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
decoded.get().padding.isNone()
|
||||||
|
|
||||||
|
test "should roundtrip custom padding with signature":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10),
|
||||||
|
padding: some(repeat(byte 2, 100)))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
privKey.getPublicKey() == decoded.get().src.get()
|
||||||
|
decoded.get().padding.isSome()
|
||||||
|
payload.padding.get() == decoded.get().padding.get()
|
||||||
|
|
||||||
|
test "should roundtrip custom 0 padding with signature":
|
||||||
|
let padding: seq[byte] = @[]
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10),
|
||||||
|
padding: some(padding))
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
|
||||||
|
let decoded = whisper.decode(encoded.get())
|
||||||
|
check:
|
||||||
|
decoded.isSome()
|
||||||
|
payload.payload == decoded.get().payload
|
||||||
|
privKey.getPublicKey() == decoded.get().src.get()
|
||||||
|
decoded.get().padding.isNone()
|
||||||
|
|
||||||
|
# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/whisper/src/message.rs#L439
|
||||||
|
let
|
||||||
|
env0 = Envelope(
|
||||||
|
expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0],
|
||||||
|
data: repeat(byte 9, 256), nonce: 1010101)
|
||||||
|
env1 = Envelope(
|
||||||
|
expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0],
|
||||||
|
data: repeat(byte 9, 256), nonce: 1010102)
|
||||||
|
|
||||||
|
suite "Whisper envelope":
|
||||||
|
test "should use correct fields for pow hash":
|
||||||
|
# XXX checked with parity, should check with geth too - found a potential bug
|
||||||
|
# in parity while playing with it:
|
||||||
|
# https://github.com/paritytech/parity-ethereum/issues/9625
|
||||||
|
check $calcPowHash(env0) ==
|
||||||
|
"A13B48480AEB3123CD2358516E2E8EE9FCB0F4CB37E68CD09FDF7F9A7E14767C"
|
||||||
|
|
||||||
|
test "should validate and allow envelope according to config":
|
||||||
|
let ttl = 1'u32
|
||||||
|
let topic = [byte 1, 2, 3, 4]
|
||||||
|
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
|
||||||
|
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
|
||||||
|
|
||||||
|
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
|
||||||
|
data: repeat(byte 9, 256), nonce: 0)
|
||||||
|
check env.valid()
|
||||||
|
|
||||||
|
let msg = initMessage(env)
|
||||||
|
check msg.allowed(config)
|
||||||
|
|
||||||
|
test "should invalidate envelope due to ttl 0":
|
||||||
|
let ttl = 0'u32
|
||||||
|
let topic = [byte 1, 2, 3, 4]
|
||||||
|
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
|
||||||
|
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
|
||||||
|
|
||||||
|
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
|
||||||
|
data: repeat(byte 9, 256), nonce: 0)
|
||||||
|
check env.valid() == false
|
||||||
|
|
||||||
|
test "should invalidate envelope due to expired":
|
||||||
|
let ttl = 1'u32
|
||||||
|
let topic = [byte 1, 2, 3, 4]
|
||||||
|
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
|
||||||
|
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
|
||||||
|
|
||||||
|
let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic,
|
||||||
|
data: repeat(byte 9, 256), nonce: 0)
|
||||||
|
check env.valid() == false
|
||||||
|
|
||||||
|
test "should invalidate envelope due to in the future":
|
||||||
|
let ttl = 1'u32
|
||||||
|
let topic = [byte 1, 2, 3, 4]
|
||||||
|
let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(),
|
||||||
|
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
|
||||||
|
|
||||||
|
# there is currently a 2 second tolerance, hence the + 3
|
||||||
|
let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, topic: topic,
|
||||||
|
data: repeat(byte 9, 256), nonce: 0)
|
||||||
|
check env.valid() == false
|
||||||
|
|
||||||
|
test "should not allow envelope due to bloom filter":
|
||||||
|
let topic = [byte 1, 2, 3, 4]
|
||||||
|
let wrongTopic = [byte 9, 8, 7, 6]
|
||||||
|
let config = WhisperConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(),
|
||||||
|
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
|
||||||
|
|
||||||
|
let env = Envelope(expiry:100000 , ttl: 30, topic: topic,
|
||||||
|
data: repeat(byte 9, 256), nonce: 0)
|
||||||
|
|
||||||
|
let msg = initMessage(env)
|
||||||
|
check msg.allowed(config) == false
|
||||||
|
|
||||||
|
|
||||||
|
suite "Whisper queue":
|
||||||
|
test "should throw out lower proof-of-work item when full":
|
||||||
|
var queue = initQueue(1)
|
||||||
|
|
||||||
|
let msg0 = initMessage(env0)
|
||||||
|
let msg1 = initMessage(env1)
|
||||||
|
|
||||||
|
discard queue.add(msg0)
|
||||||
|
discard queue.add(msg1)
|
||||||
|
|
||||||
|
check:
|
||||||
|
queue.items.len() == 1
|
||||||
|
queue.items[0].env.nonce ==
|
||||||
|
(if msg0.pow > msg1.pow: msg0.env.nonce else: msg1.env.nonce)
|
||||||
|
|
||||||
|
test "should not throw out messages as long as there is capacity":
|
||||||
|
var queue = initQueue(2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
queue.add(initMessage(env0)) == true
|
||||||
|
queue.add(initMessage(env1)) == true
|
||||||
|
|
||||||
|
queue.items.len() == 2
|
||||||
|
|
||||||
|
test "check field order against expected rlp order":
|
||||||
|
check rlp.encode(env0) ==
|
||||||
|
rlp.encodeList(env0.expiry, env0.ttl, env0.topic, env0.data, env0.nonce)
|
||||||
|
|
||||||
|
# To test filters we do not care if the msg is valid or allowed
|
||||||
|
proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](),
|
||||||
|
src = none[PrivateKey](), topic: Topic,
|
||||||
|
padding = none[seq[byte]]()): Message =
|
||||||
|
let payload = Payload(dst: pubKey, symKey: symKey, src: src,
|
||||||
|
payload: @[byte 0, 1, 2], padding: padding)
|
||||||
|
let encoded = whisper.encode(payload)
|
||||||
|
let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(),
|
||||||
|
nonce: 0)
|
||||||
|
result = initMessage(env)
|
||||||
|
|
||||||
|
suite "Whisper filter":
|
||||||
|
test "should notify filter on message with symmetric encryption":
|
||||||
|
var symKey: SymKey
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let msg = prepFilterTestMsg(symKey = some(symKey), topic = topic)
|
||||||
|
|
||||||
|
var filters = initTable[string, Filter]()
|
||||||
|
let filter = newFilter(symKey = some(symKey), topics = @[topic])
|
||||||
|
let filterId = filters.subscribeFilter(filter)
|
||||||
|
|
||||||
|
notify(filters, msg)
|
||||||
|
|
||||||
|
let messages = filters.getFilterMessages(filterId)
|
||||||
|
check messages.len == 1
|
||||||
|
|
||||||
|
test "should notify filter on message with asymmetric encryption":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let msg = prepFilterTestMsg(pubKey = some(privKey.getPublicKey()),
|
||||||
|
topic = topic)
|
||||||
|
|
||||||
|
var filters = initTable[string, Filter]()
|
||||||
|
let filter = newFilter(privateKey = some(privKey), topics = @[topic])
|
||||||
|
let filterId = filters.subscribeFilter(filter)
|
||||||
|
|
||||||
|
notify(filters, msg)
|
||||||
|
|
||||||
|
let messages = filters.getFilterMessages(filterId)
|
||||||
|
check messages.len == 1
|
||||||
|
|
||||||
|
test "should notify filter on message with signature":
|
||||||
|
let privKey = keys.newPrivateKey()
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let msg = prepFilterTestMsg(src = some(privKey), topic = topic)
|
||||||
|
|
||||||
|
var filters = initTable[string, Filter]()
|
||||||
|
let filter = newFilter(src = some(privKey.getPublicKey()),
|
||||||
|
topics = @[topic])
|
||||||
|
let filterId = filters.subscribeFilter(filter)
|
||||||
|
|
||||||
|
notify(filters, msg)
|
||||||
|
|
||||||
|
let messages = filters.getFilterMessages(filterId)
|
||||||
|
check messages.len == 1
|
||||||
|
|
||||||
|
test "test notify of filter against PoW requirement":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let padding = some(repeat(byte 0, 251))
|
||||||
|
# this message has a PoW of 0.02962962962962963, number should be updated
|
||||||
|
# in case PoW algorithm changes or contents of padding, payload, topic, etc.
|
||||||
|
let msg = prepFilterTestMsg(topic = topic, padding = padding)
|
||||||
|
|
||||||
|
var filters = initTable[string, Filter]()
|
||||||
|
let
|
||||||
|
filterId1 = filters.subscribeFilter(
|
||||||
|
newFilter(topics = @[topic], powReq = 0.02962962962962963))
|
||||||
|
filterId2 = filters.subscribeFilter(
|
||||||
|
newFilter(topics = @[topic], powReq = 0.02962962962962964))
|
||||||
|
|
||||||
|
notify(filters, msg)
|
||||||
|
|
||||||
|
check:
|
||||||
|
filters.getFilterMessages(filterId1).len == 1
|
||||||
|
filters.getFilterMessages(filterId2).len == 0
|
||||||
|
|
||||||
|
test "test notify of filter on message with certain topic":
|
||||||
|
let
|
||||||
|
topic1 = [byte 0xAB, 0x12, 0xCD, 0x34]
|
||||||
|
topic2 = [byte 0, 0, 0, 0]
|
||||||
|
|
||||||
|
let msg = prepFilterTestMsg(topic = topic1)
|
||||||
|
|
||||||
|
var filters = initTable[string, Filter]()
|
||||||
|
let
|
||||||
|
filterId1 = filters.subscribeFilter(newFilter(topics = @[topic1]))
|
||||||
|
filterId2 = filters.subscribeFilter(newFilter(topics = @[topic2]))
|
||||||
|
|
||||||
|
notify(filters, msg)
|
||||||
|
|
||||||
|
check:
|
||||||
|
filters.getFilterMessages(filterId1).len == 1
|
||||||
|
filters.getFilterMessages(filterId2).len == 0
|
|
@ -0,0 +1,385 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import
|
||||||
|
sequtils, options, unittest, tables, asyncdispatch2, eth/[rlp, keys, p2p],
|
||||||
|
eth/p2p/rlpx_protocols/[whisper_protocol], eth/p2p/[discovery, enode]
|
||||||
|
|
||||||
|
const
|
||||||
|
useCompression = defined(useSnappy)
|
||||||
|
|
||||||
|
var nextPort = 30303
|
||||||
|
|
||||||
|
proc localAddress(port: int): Address =
|
||||||
|
let port = Port(port)
|
||||||
|
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
|
proc startDiscoveryNode(privKey: PrivateKey, address: Address,
|
||||||
|
bootnodes: seq[ENode]): Future[DiscoveryProtocol] {.async.} =
|
||||||
|
result = newDiscoveryProtocol(privKey, address, bootnodes)
|
||||||
|
result.open()
|
||||||
|
await result.bootstrap()
|
||||||
|
|
||||||
|
proc setupBootNode(): Future[ENode] {.async.} =
|
||||||
|
let
|
||||||
|
bootNodeKey = newPrivateKey()
|
||||||
|
bootNodeAddr = localAddress(30301)
|
||||||
|
bootNode = await startDiscoveryNode(bootNodeKey, bootNodeAddr, @[])
|
||||||
|
result = initENode(bootNodeKey.getPublicKey, bootNodeAddr)
|
||||||
|
|
||||||
|
template asyncTest(name, body: untyped) =
|
||||||
|
test name:
|
||||||
|
proc scenario {.async.} = body
|
||||||
|
waitFor scenario()
|
||||||
|
|
||||||
|
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
|
||||||
|
for node in nodes:
|
||||||
|
node.resetMessageQueue()
|
||||||
|
|
||||||
|
proc prepTestNode(): EthereumNode =
|
||||||
|
let keys1 = newKeyPair()
|
||||||
|
result = newEthereumNode(keys1, localAddress(nextPort), 1, nil,
|
||||||
|
addAllCapabilities = false,
|
||||||
|
useCompression = useCompression)
|
||||||
|
nextPort.inc
|
||||||
|
result.addCapability Whisper
|
||||||
|
|
||||||
|
let bootENode = waitFor setupBootNode()
|
||||||
|
|
||||||
|
var node1 = prepTestNode()
|
||||||
|
var node2 = prepTestNode()
|
||||||
|
# node2 listening and node1 not, to avoid many incoming vs outgoing
|
||||||
|
var node1Connected = node1.connectToNetwork(@[bootENode], false, true)
|
||||||
|
var node2Connected = node2.connectToNetwork(@[bootENode], true, true)
|
||||||
|
waitFor node1Connected
|
||||||
|
waitFor node2Connected
|
||||||
|
|
||||||
|
suite "Whisper connections":
|
||||||
|
asyncTest "Two peers connected":
|
||||||
|
check:
|
||||||
|
node1.peerPool.connectedNodes.len() == 1
|
||||||
|
node2.peerPool.connectedNodes.len() == 1
|
||||||
|
|
||||||
|
asyncTest "Filters with encryption and signing":
|
||||||
|
let encryptKeyPair = newKeyPair()
|
||||||
|
let signKeyPair = newKeyPair()
|
||||||
|
var symKey: SymKey
|
||||||
|
let topic = [byte 0x12, 0, 0, 0]
|
||||||
|
var filters: seq[string] = @[]
|
||||||
|
var payloads = [repeat(byte 1, 10), repeat(byte 2, 10),
|
||||||
|
repeat(byte 3, 10), repeat(byte 4, 10)]
|
||||||
|
var futures = [newFuture[int](), newFuture[int](),
|
||||||
|
newFuture[int](), newFuture[int]()]
|
||||||
|
|
||||||
|
proc handler1(msg: ReceivedMessage) =
|
||||||
|
var count {.global.}: int
|
||||||
|
check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1]
|
||||||
|
count += 1
|
||||||
|
if count == 2: futures[0].complete(1)
|
||||||
|
proc handler2(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payloads[1]
|
||||||
|
futures[1].complete(1)
|
||||||
|
proc handler3(msg: ReceivedMessage) =
|
||||||
|
var count {.global.}: int
|
||||||
|
check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3]
|
||||||
|
count += 1
|
||||||
|
if count == 2: futures[2].complete(1)
|
||||||
|
proc handler4(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payloads[3]
|
||||||
|
futures[3].complete(1)
|
||||||
|
|
||||||
|
# Filters
|
||||||
|
# filter for encrypted asym
|
||||||
|
filters.add(node1.subscribeFilter(newFilter(privateKey = some(encryptKeyPair.seckey),
|
||||||
|
topics = @[topic]), handler1))
|
||||||
|
# filter for encrypted asym + signed
|
||||||
|
filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey),
|
||||||
|
privateKey = some(encryptKeyPair.seckey),
|
||||||
|
topics = @[topic]), handler2))
|
||||||
|
# filter for encrypted sym
|
||||||
|
filters.add(node1.subscribeFilter(newFilter(symKey = some(symKey),
|
||||||
|
topics = @[topic]), handler3))
|
||||||
|
# filter for encrypted sym + signed
|
||||||
|
filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey),
|
||||||
|
symKey = some(symKey),
|
||||||
|
topics = @[topic]), handler4))
|
||||||
|
var safeTTL = 5'u32
|
||||||
|
# Messages
|
||||||
|
check:
|
||||||
|
# encrypted asym
|
||||||
|
node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL,
|
||||||
|
topic = topic, payload = payloads[0]) == true
|
||||||
|
# encrypted asym + signed
|
||||||
|
node2.postMessage(some(encryptKeyPair.pubkey),
|
||||||
|
src = some(signKeyPair.seckey), ttl = safeTTL,
|
||||||
|
topic = topic, payload = payloads[1]) == true
|
||||||
|
# encrypted sym
|
||||||
|
node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic,
|
||||||
|
payload = payloads[2]) == true
|
||||||
|
# encrypted sym + signed
|
||||||
|
node2.postMessage(symKey = some(symKey),
|
||||||
|
src = some(signKeyPair.seckey),
|
||||||
|
ttl = safeTTL, topic = topic,
|
||||||
|
payload = payloads[3]) == true
|
||||||
|
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 4
|
||||||
|
|
||||||
|
var f = all(futures)
|
||||||
|
await f or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
f.finished == true
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 4
|
||||||
|
|
||||||
|
for filter in filters:
|
||||||
|
check node1.unsubscribeFilter(filter) == true
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Filters with topics":
|
||||||
|
let topic1 = [byte 0x12, 0, 0, 0]
|
||||||
|
let topic2 = [byte 0x34, 0, 0, 0]
|
||||||
|
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
|
||||||
|
var futures = [newFuture[int](), newFuture[int]()]
|
||||||
|
proc handler1(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payloads[0]
|
||||||
|
futures[0].complete(1)
|
||||||
|
proc handler2(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payloads[1]
|
||||||
|
futures[1].complete(1)
|
||||||
|
|
||||||
|
var filter1 = node1.subscribeFilter(newFilter(topics = @[topic1]), handler1)
|
||||||
|
var filter2 = node1.subscribeFilter(newFilter(topics = @[topic2]), handler2)
|
||||||
|
|
||||||
|
var safeTTL = 3'u32
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL + 1, topic = topic1,
|
||||||
|
payload = payloads[0]) == true
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = topic2,
|
||||||
|
payload = payloads[1]) == true
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 2
|
||||||
|
|
||||||
|
var f = all(futures)
|
||||||
|
await f or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
f.finished == true
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 2
|
||||||
|
|
||||||
|
node1.unsubscribeFilter(filter1) == true
|
||||||
|
node1.unsubscribeFilter(filter2) == true
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Filters with PoW":
|
||||||
|
let topic = [byte 0x12, 0, 0, 0]
|
||||||
|
var payload = repeat(byte 0, 10)
|
||||||
|
var futures = [newFuture[int](), newFuture[int]()]
|
||||||
|
proc handler1(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payload
|
||||||
|
futures[0].complete(1)
|
||||||
|
proc handler2(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payload
|
||||||
|
futures[1].complete(1)
|
||||||
|
|
||||||
|
var filter1 = node1.subscribeFilter(newFilter(topics = @[topic], powReq = 0),
|
||||||
|
handler1)
|
||||||
|
var filter2 = node1.subscribeFilter(newFilter(topics = @[topic],
|
||||||
|
powReq = 1_000_000), handler2)
|
||||||
|
|
||||||
|
let safeTTL = 2'u32
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
|
||||||
|
|
||||||
|
await futures[0] or sleepAsync(messageInterval)
|
||||||
|
await futures[1] or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
futures[0].finished == true
|
||||||
|
futures[1].finished == false
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 1
|
||||||
|
|
||||||
|
node1.unsubscribeFilter(filter1) == true
|
||||||
|
node1.unsubscribeFilter(filter2) == true
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Filters with queues":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let payload = repeat(byte 0, 10)
|
||||||
|
|
||||||
|
var filter = node1.subscribeFilter(newFilter(topics = @[topic]))
|
||||||
|
for i in countdown(10, 1):
|
||||||
|
check node2.postMessage(ttl = i.uint32, topic = topic,
|
||||||
|
payload = payload) == true
|
||||||
|
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
node1.getFilterMessages(filter).len() == 10
|
||||||
|
node1.getFilterMessages(filter).len() == 0
|
||||||
|
node1.unsubscribeFilter(filter) == true
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Local filter notify":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
|
||||||
|
var filter = node1.subscribeFilter(newFilter(topics = @[topic]))
|
||||||
|
let safeTTL = 2'u32
|
||||||
|
check:
|
||||||
|
node1.postMessage(ttl = safeTTL, topic = topic,
|
||||||
|
payload = repeat(byte 4, 10)) == true
|
||||||
|
node1.getFilterMessages(filter).len() == 1
|
||||||
|
node1.unsubscribeFilter(filter) == true
|
||||||
|
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Bloomfilter blocking":
|
||||||
|
let sendTopic1 = [byte 0x12, 0, 0, 0]
|
||||||
|
let sendTopic2 = [byte 0x34, 0, 0, 0]
|
||||||
|
let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]]
|
||||||
|
let payload = repeat(byte 0, 10)
|
||||||
|
var f: Future[int] = newFuture[int]()
|
||||||
|
proc handler(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == payload
|
||||||
|
f.complete(1)
|
||||||
|
var filter = node1.subscribeFilter(newFilter(topics = filterTopics), handler)
|
||||||
|
await node1.setBloomFilter(node1.filtersToBloom())
|
||||||
|
|
||||||
|
let safeTTL = 2'u32
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = sendTopic1,
|
||||||
|
payload = payload) == true
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 1
|
||||||
|
|
||||||
|
await f or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
f.finished == false
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
f = newFuture[int]()
|
||||||
|
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = sendTopic2,
|
||||||
|
payload = payload) == true
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 1
|
||||||
|
|
||||||
|
await f or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
f.finished == true
|
||||||
|
f.read() == 1
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 1
|
||||||
|
|
||||||
|
node1.unsubscribeFilter(filter) == true
|
||||||
|
|
||||||
|
await node1.setBloomFilter(fullBloom())
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "PoW blocking":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let payload = repeat(byte 0, 10)
|
||||||
|
let safeTTL = 2'u32
|
||||||
|
|
||||||
|
await node1.setPowRequirement(1_000_000)
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 1
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
await node1.setPowRequirement(0.0)
|
||||||
|
check:
|
||||||
|
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 1
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 1
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "Queue pruning":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
let payload = repeat(byte 0, 10)
|
||||||
|
# We need a minimum TTL of 2 as when set to 1 there is a small chance that
|
||||||
|
# it is already expired after messageInterval due to rounding down of float
|
||||||
|
# to uint32 in postMessage()
|
||||||
|
let minTTL = 2'u32
|
||||||
|
for i in countdown(minTTL + 9, minTTL):
|
||||||
|
check node2.postMessage(ttl = i, topic = topic, payload = payload) == true
|
||||||
|
check node2.protocolState(Whisper).queue.items.len == 10
|
||||||
|
|
||||||
|
await sleepAsync(messageInterval)
|
||||||
|
check node1.protocolState(Whisper).queue.items.len == 10
|
||||||
|
|
||||||
|
await sleepAsync(int(minTTL*1000))
|
||||||
|
check node1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
check node2.protocolState(Whisper).queue.items.len == 0
|
||||||
|
|
||||||
|
resetMessageQueues(node1, node2)
|
||||||
|
|
||||||
|
asyncTest "P2P post":
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
var f: Future[int] = newFuture[int]()
|
||||||
|
proc handler(msg: ReceivedMessage) =
|
||||||
|
check msg.decoded.payload == repeat(byte 4, 10)
|
||||||
|
f.complete(1)
|
||||||
|
|
||||||
|
var filter = node1.subscribeFilter(newFilter(topics = @[topic],
|
||||||
|
allowP2P = true), handler)
|
||||||
|
check:
|
||||||
|
node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true
|
||||||
|
node2.postMessage(ttl = 10, topic = topic,
|
||||||
|
payload = repeat(byte 4, 10),
|
||||||
|
targetPeer = some(toNodeId(node1.keys.pubkey))) == true
|
||||||
|
|
||||||
|
await f or sleepAsync(messageInterval)
|
||||||
|
check:
|
||||||
|
f.finished == true
|
||||||
|
f.read() == 1
|
||||||
|
node1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
node2.protocolState(Whisper).queue.items.len == 0
|
||||||
|
|
||||||
|
node1.unsubscribeFilter(filter) == true
|
||||||
|
|
||||||
|
test "Light node posting":
|
||||||
|
var ln1 = prepTestNode()
|
||||||
|
ln1.setLightNode(true)
|
||||||
|
|
||||||
|
# not listening, so will only connect to others that are listening (node2)
|
||||||
|
waitFor ln1.connectToNetwork(@[bootENode], false, true)
|
||||||
|
|
||||||
|
let topic = [byte 0, 0, 0, 0]
|
||||||
|
|
||||||
|
let safeTTL = 2'u32
|
||||||
|
check:
|
||||||
|
# normal post
|
||||||
|
ln1.postMessage(ttl = safeTTL, topic = topic,
|
||||||
|
payload = repeat(byte 0, 10)) == false
|
||||||
|
ln1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
# P2P post
|
||||||
|
ln1.postMessage(ttl = safeTTL, topic = topic,
|
||||||
|
payload = repeat(byte 0, 10),
|
||||||
|
targetPeer = some(toNodeId(node2.keys.pubkey))) == true
|
||||||
|
ln1.protocolState(Whisper).queue.items.len == 0
|
||||||
|
|
||||||
|
test "Connect two light nodes":
|
||||||
|
var ln1 = prepTestNode()
|
||||||
|
var ln2 = prepTestNode()
|
||||||
|
|
||||||
|
ln1.setLightNode(true)
|
||||||
|
ln2.setLightNode(true)
|
||||||
|
|
||||||
|
ln2.startListening()
|
||||||
|
let peer = waitFor ln1.rlpxConnect(newNode(initENode(ln2.keys.pubKey,
|
||||||
|
ln2.address)))
|
||||||
|
check peer.isNil == true
|
|
@ -0,0 +1,49 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import
|
||||||
|
options, unittest, asyncdispatch2, eth/[rlp, keys, p2p],
|
||||||
|
eth/p2p/mock_peers, eth/p2p/rlpx_protocols/[whisper_protocol]
|
||||||
|
|
||||||
|
proc localAddress(port: int): Address =
|
||||||
|
let port = Port(port)
|
||||||
|
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
|
template asyncTest(name, body: untyped) =
|
||||||
|
test name:
|
||||||
|
proc scenario {.async.} = body
|
||||||
|
waitFor scenario()
|
||||||
|
|
||||||
|
asyncTest "network with 3 peers using the Whisper protocol":
|
||||||
|
const useCompression = defined(useSnappy)
|
||||||
|
let localKeys = newKeyPair()
|
||||||
|
let localAddress = localAddress(30303)
|
||||||
|
var localNode = newEthereumNode(localKeys, localAddress, 1, nil,
|
||||||
|
addAllCapabilities = false,
|
||||||
|
useCompression = useCompression)
|
||||||
|
localNode.addCapability Whisper
|
||||||
|
localNode.startListening()
|
||||||
|
|
||||||
|
var mock1 = newMockPeer do (m: MockConf):
|
||||||
|
m.addHandshake Whisper.status(protocolVersion: whisperVersion, powConverted: 0,
|
||||||
|
bloom: @[], isLightNode: false)
|
||||||
|
m.expect Whisper.messages
|
||||||
|
|
||||||
|
var mock2 = newMockPeer do (m: MockConf):
|
||||||
|
m.addHandshake Whisper.status(protocolVersion: whisperVersion,
|
||||||
|
powConverted: cast[uint](0.1),
|
||||||
|
bloom: @[], isLightNode: false)
|
||||||
|
m.expect Whisper.messages
|
||||||
|
|
||||||
|
var mock1Peer = await localNode.rlpxConnect(mock1)
|
||||||
|
var mock2Peer = await localNode.rlpxConnect(mock2)
|
||||||
|
|
||||||
|
check:
|
||||||
|
mock1Peer.state(Whisper).powRequirement == 0
|
||||||
|
mock2Peer.state(Whisper).powRequirement == 0.1
|
|
@ -0,0 +1,135 @@
|
||||||
|
#
|
||||||
|
# Ethereum P2P
|
||||||
|
# (c) Copyright 2018
|
||||||
|
# Status Research & Development GmbH
|
||||||
|
#
|
||||||
|
# Licensed under either of
|
||||||
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||||
|
# MIT license (LICENSE-MIT)
|
||||||
|
|
||||||
|
import
|
||||||
|
sequtils, strformat, options, unittest,
|
||||||
|
chronicles, asyncdispatch2, eth/[rlp, keys, p2p],
|
||||||
|
eth/p2p/mock_peers
|
||||||
|
|
||||||
|
const
|
||||||
|
clientId = "nim-eth-p2p/0.0.1"
|
||||||
|
|
||||||
|
type
|
||||||
|
AbcPeer = ref object
|
||||||
|
peerName: string
|
||||||
|
lastResponse: string
|
||||||
|
|
||||||
|
XyzPeer = ref object
|
||||||
|
messages: int
|
||||||
|
|
||||||
|
AbcNetwork = ref object
|
||||||
|
peers: seq[string]
|
||||||
|
|
||||||
|
p2pProtocol abc(version = 1,
|
||||||
|
peerState = AbcPeer,
|
||||||
|
networkState = AbcNetwork,
|
||||||
|
timeout = 100):
|
||||||
|
|
||||||
|
onPeerConnected do (peer: Peer):
|
||||||
|
await peer.hi "Bob"
|
||||||
|
let response = await peer.nextMsg(abc.hi)
|
||||||
|
peer.networkState.peers.add response.name
|
||||||
|
|
||||||
|
onPeerDisconnected do (peer: Peer, reason: DisconnectionReason):
|
||||||
|
echo "peer disconnected", peer
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc abcReq(p: Peer, n: int) =
|
||||||
|
echo "got req ", n
|
||||||
|
await p.abcRes(reqId, &"response to #{n}")
|
||||||
|
|
||||||
|
proc abcRes(p: Peer, data: string) =
|
||||||
|
echo "got response ", data
|
||||||
|
|
||||||
|
proc hi(p: Peer, name: string) =
|
||||||
|
echo "got hi from ", name
|
||||||
|
p.state.peerName = name
|
||||||
|
let query = 123
|
||||||
|
echo "sending req #", query
|
||||||
|
var r = await p.abcReq(query)
|
||||||
|
if r.isSome:
|
||||||
|
p.state.lastResponse = r.get.data
|
||||||
|
else:
|
||||||
|
p.state.lastResponse = "timeout"
|
||||||
|
|
||||||
|
p2pProtocol xyz(version = 1,
|
||||||
|
peerState = XyzPeer,
|
||||||
|
useRequestIds = false,
|
||||||
|
timeout = 100):
|
||||||
|
|
||||||
|
proc foo(p: Peer, s: string, a, z: int) =
|
||||||
|
p.state.messages += 1
|
||||||
|
if p.supports(abc):
|
||||||
|
echo p.state(abc).peerName
|
||||||
|
|
||||||
|
proc bar(p: Peer, i: int, s: string)
|
||||||
|
|
||||||
|
requestResponse:
|
||||||
|
proc xyzReq(p: Peer, n: int, timeout = 3000) =
|
||||||
|
echo "got req ", n
|
||||||
|
|
||||||
|
proc xyzRes(p: Peer, data: string) =
|
||||||
|
echo "got response ", data
|
||||||
|
|
||||||
|
proc defaultTestingHandshake(_: type abc): abc.hi =
|
||||||
|
result.name = "John Doe"
|
||||||
|
|
||||||
|
proc localAddress(port: int): Address =
|
||||||
|
let port = Port(port)
|
||||||
|
result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1"))
|
||||||
|
|
||||||
|
template asyncTest(name, body: untyped) =
|
||||||
|
test name:
|
||||||
|
proc scenario {.async.} = body
|
||||||
|
waitFor scenario()
|
||||||
|
|
||||||
|
asyncTest "network with 3 peers using custom protocols":
|
||||||
|
const useCompression = defined(useSnappy)
|
||||||
|
let localKeys = newKeyPair()
|
||||||
|
let localAddress = localAddress(30303)
|
||||||
|
var localNode = newEthereumNode(localKeys, localAddress, 1, nil, useCompression = useCompression)
|
||||||
|
localNode.startListening()
|
||||||
|
|
||||||
|
var mock1 = newMockPeer do (m: MockConf):
|
||||||
|
m.addHandshake abc.hi(name: "Alice")
|
||||||
|
|
||||||
|
m.expect(abc.abcReq) do (peer: Peer, data: Rlp):
|
||||||
|
let reqId = data.readReqId()
|
||||||
|
await peer.abcRes(reqId, "mock response")
|
||||||
|
await sleepAsync(100)
|
||||||
|
let r = await peer.abcReq(1)
|
||||||
|
assert r.get.data == "response to #1"
|
||||||
|
|
||||||
|
m.expect(abc.abcRes)
|
||||||
|
|
||||||
|
var mock2 = newMockPeer do (m: MockConf):
|
||||||
|
m.addCapability xyz
|
||||||
|
m.addCapability abc
|
||||||
|
|
||||||
|
m.expect(abc.abcReq) # we'll let this one time out
|
||||||
|
|
||||||
|
m.expect(xyz.xyzReq) do (peer: Peer):
|
||||||
|
echo "got xyz req"
|
||||||
|
await peer.xyzRes("mock peer data")
|
||||||
|
|
||||||
|
when useCompression:
|
||||||
|
m.useCompression = useCompression
|
||||||
|
|
||||||
|
discard await mock1.rlpxConnect(localNode)
|
||||||
|
let mock2Connection = await localNode.rlpxConnect(mock2)
|
||||||
|
|
||||||
|
let r = await mock2Connection.xyzReq(10)
|
||||||
|
check r.get.data == "mock peer data"
|
||||||
|
|
||||||
|
let abcNetState = localNode.protocolState(abc)
|
||||||
|
|
||||||
|
check:
|
||||||
|
abcNetState.peers.len == 2
|
||||||
|
"Alice" in abcNetState.peers
|
||||||
|
"John Doe" in abcNetState.peers
|
Loading…
Reference in New Issue