2019-03-05 22:54:08 +00:00
|
|
|
import
|
2019-07-07 09:53:58 +00:00
|
|
|
algorithm,
|
2019-08-05 00:00:49 +00:00
|
|
|
stew/varints, stew/shims/[macros, tables], chronos, chronicles,
|
2019-03-05 22:54:08 +00:00
|
|
|
libp2p/daemon/daemonapi, faststreams/output_stream, serialization,
|
2019-06-24 15:13:48 +00:00
|
|
|
json_serialization/std/options, eth/p2p/p2p_protocol_dsl,
|
|
|
|
libp2p_json_serialization, ssz
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
export
|
2019-09-09 02:39:44 +00:00
|
|
|
daemonapi, p2pProtocol, libp2p_json_serialization, ssz
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
Eth2Node* = ref object of RootObj
|
|
|
|
daemon*: DaemonAPI
|
|
|
|
peers*: Table[PeerID, Peer]
|
|
|
|
protocolStates*: seq[RootRef]
|
|
|
|
|
2019-06-05 02:00:07 +00:00
|
|
|
EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
Peer* = ref object
|
2019-05-22 07:13:15 +00:00
|
|
|
network*: Eth2Node
|
|
|
|
id*: PeerID
|
2019-09-08 22:03:41 +00:00
|
|
|
wasDialed*: bool
|
2019-05-22 07:13:15 +00:00
|
|
|
connectionState*: ConnectionState
|
2019-03-05 22:54:08 +00:00
|
|
|
protocolStates*: seq[RootRef]
|
2019-06-17 11:08:05 +00:00
|
|
|
maxInactivityAllowed*: Duration
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-05 02:00:07 +00:00
|
|
|
ConnectionState* = enum
|
|
|
|
None,
|
|
|
|
Connecting,
|
|
|
|
Connected,
|
|
|
|
Disconnecting,
|
|
|
|
Disconnected
|
|
|
|
|
|
|
|
DisconnectionReason* = enum
|
2019-08-05 00:00:49 +00:00
|
|
|
ClientShutDown
|
|
|
|
IrrelevantNetwork
|
2019-06-10 23:20:18 +00:00
|
|
|
FaultOrError
|
2019-06-05 02:00:07 +00:00
|
|
|
|
2019-09-11 16:45:22 +00:00
|
|
|
UntypedResponder = object
|
2019-06-05 02:00:07 +00:00
|
|
|
peer*: Peer
|
|
|
|
stream*: P2PStream
|
|
|
|
|
|
|
|
Responder*[MsgType] = distinct UntypedResponder
|
|
|
|
|
|
|
|
MessageInfo* = object
|
|
|
|
name*: string
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
thunk*: ThunkProc
|
|
|
|
libp2pProtocol: string
|
|
|
|
printer*: MessageContentPrinter
|
|
|
|
nextMsgResolver*: NextMsgResolver
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
ProtocolInfoObj* = object
|
|
|
|
name*: string
|
|
|
|
messages*: seq[MessageInfo]
|
|
|
|
index*: int # the position of the protocol in the
|
|
|
|
# ordered list of supported protocols
|
|
|
|
|
|
|
|
# Private fields:
|
|
|
|
peerStateInitializer*: PeerStateInitializer
|
|
|
|
networkStateInitializer*: NetworkStateInitializer
|
|
|
|
handshake*: HandshakeStep
|
|
|
|
disconnectHandler*: DisconnectionHandler
|
|
|
|
|
|
|
|
ProtocolInfo* = ptr ProtocolInfoObj
|
|
|
|
|
|
|
|
CompressedMsgId = tuple
|
2019-06-05 02:00:07 +00:00
|
|
|
protocolIdx, methodId: int
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
ResponseCode* = enum
|
|
|
|
Success
|
|
|
|
InvalidRequest
|
|
|
|
ServerError
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.}
|
|
|
|
NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.}
|
2019-06-21 16:32:52 +00:00
|
|
|
HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.}
|
2019-03-05 22:54:08 +00:00
|
|
|
DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.}
|
2019-05-29 08:21:03 +00:00
|
|
|
ThunkProc* = proc(daemon: DaemonAPI, stream: P2PStream): Future[void] {.gcsafe.}
|
2019-03-05 22:54:08 +00:00
|
|
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.}
|
|
|
|
NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.}
|
|
|
|
|
|
|
|
Bytes = seq[byte]
|
|
|
|
|
|
|
|
PeerDisconnected* = object of CatchableError
|
|
|
|
reason*: DisconnectionReason
|
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
TransmissionError* = object of CatchableError
|
|
|
|
|
2019-09-10 01:51:30 +00:00
|
|
|
ResponseSizeLimitReached* = object of CatchableError
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
const
|
|
|
|
defaultIncomingReqTimeout = 5000
|
2019-08-05 00:00:49 +00:00
|
|
|
HandshakeTimeout = FaultOrError
|
2019-09-09 23:55:01 +00:00
|
|
|
|
|
|
|
# Spec constants
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains
|
|
|
|
REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
|
|
|
GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes
|
|
|
|
TTFB_TIMEOUT* = 5.seconds
|
|
|
|
RESP_TIMEOUT* = 10.seconds
|
|
|
|
|
|
|
|
readTimeoutErrorMsg = "Exceeded read timeout for a request"
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-09-10 01:51:30 +00:00
|
|
|
logScope:
|
|
|
|
topic = "libp2p"
|
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
template `$`*(peer: Peer): string = $peer.id
|
|
|
|
chronicles.formatIt(Peer): $it
|
|
|
|
|
|
|
|
template libp2pProtocol*(name: string, version: int) {.pragma.}
|
2019-05-22 07:13:15 +00:00
|
|
|
|
2019-06-05 02:00:07 +00:00
|
|
|
include eth/p2p/p2p_backends_helpers
|
|
|
|
include eth/p2p/p2p_tracing
|
2019-08-05 00:00:49 +00:00
|
|
|
|
|
|
|
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer {.gcsafe.}
|
|
|
|
|
|
|
|
proc getPeer*(node: Eth2Node, peerId: PeerID): Peer {.gcsafe.} =
|
|
|
|
result = node.peers.getOrDefault(peerId)
|
|
|
|
if result == nil:
|
|
|
|
result = Peer.init(node, peerId)
|
|
|
|
node.peers[peerId] = result
|
|
|
|
|
|
|
|
proc peerFromStream(daemon: DaemonAPI, stream: P2PStream): Peer {.gcsafe.} =
|
|
|
|
Eth2Node(daemon.userData).getPeer(stream.peer)
|
|
|
|
|
2019-09-11 15:01:36 +00:00
|
|
|
proc safeClose(stream: P2PStream) {.async.} =
|
|
|
|
if P2PStreamFlags.Closed notin stream.flags:
|
|
|
|
await close(stream)
|
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false) {.async.} =
|
|
|
|
# TODO: How should we notify the other peer?
|
|
|
|
if peer.connectionState notin {Disconnecting, Disconnected}:
|
|
|
|
peer.connectionState = Disconnecting
|
|
|
|
await peer.network.daemon.disconnect(peer.id)
|
|
|
|
peer.connectionState = Disconnected
|
|
|
|
peer.network.peers.del(peer.id)
|
|
|
|
|
|
|
|
template raisePeerDisconnected(msg: string, r: DisconnectionReason) =
|
|
|
|
var e = newException(PeerDisconnected, msg)
|
|
|
|
e.reason = r
|
|
|
|
raise e
|
|
|
|
|
|
|
|
proc disconnectAndRaise(peer: Peer,
|
|
|
|
reason: DisconnectionReason,
|
|
|
|
msg: string) {.async.} =
|
|
|
|
let r = reason
|
|
|
|
await peer.disconnect(r)
|
|
|
|
raisePeerDisconnected(msg, r)
|
|
|
|
|
|
|
|
template reraiseAsPeerDisconnected(peer: Peer, errMsgExpr: static string,
|
|
|
|
reason = FaultOrError): auto =
|
|
|
|
const errMsg = errMsgExpr
|
|
|
|
debug errMsg, err = getCurrentExceptionMsg()
|
|
|
|
disconnectAndRaise(peer, reason, errMsg)
|
|
|
|
|
|
|
|
proc registerProtocol(protocol: ProtocolInfo) =
|
|
|
|
# TODO: This can be done at compile-time in the future
|
|
|
|
let pos = lowerBound(gProtocols, protocol)
|
|
|
|
gProtocols.insert(protocol, pos)
|
|
|
|
for i in 0 ..< gProtocols.len:
|
|
|
|
gProtocols[i].index = i
|
|
|
|
|
|
|
|
proc setEventHandlers(p: ProtocolInfo,
|
|
|
|
handshake: HandshakeStep,
|
|
|
|
disconnectHandler: DisconnectionHandler) =
|
|
|
|
p.handshake = handshake
|
|
|
|
p.disconnectHandler = disconnectHandler
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-12 12:23:05 +00:00
|
|
|
proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} =
|
|
|
|
new result
|
|
|
|
result.daemon = daemon
|
|
|
|
result.daemon.userData = result
|
2019-06-21 16:32:52 +00:00
|
|
|
result.peers = initTable[PeerID, Peer]()
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-12 12:23:05 +00:00
|
|
|
newSeq result.protocolStates, allProtocols.len
|
2019-03-05 22:54:08 +00:00
|
|
|
for proto in allProtocols:
|
|
|
|
if proto.networkStateInitializer != nil:
|
2019-06-12 12:23:05 +00:00
|
|
|
result.protocolStates[proto.index] = proto.networkStateInitializer(result)
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
for msg in proto.messages:
|
|
|
|
if msg.libp2pProtocol.len > 0:
|
2019-06-12 12:23:05 +00:00
|
|
|
await daemon.addHandler(@[msg.libp2pProtocol], msg.thunk)
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-09-08 22:42:12 +00:00
|
|
|
proc readChunk(stream: P2PStream,
|
|
|
|
MsgType: type,
|
|
|
|
withResponseCode: bool,
|
|
|
|
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.}
|
2019-06-24 02:34:01 +00:00
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
proc readSizePrefix(transp: StreamTransport,
|
|
|
|
deadline: Future[void]): Future[int] {.async.} =
|
|
|
|
var parser: VarintParser[uint64, ProtoBuf]
|
|
|
|
while true:
|
|
|
|
var nextByte: byte
|
|
|
|
var readNextByte = transp.readExactly(addr nextByte, 1)
|
|
|
|
await readNextByte or deadline
|
|
|
|
if not readNextByte.finished:
|
|
|
|
return -1
|
|
|
|
case parser.feedByte(nextByte)
|
|
|
|
of Done:
|
|
|
|
let res = parser.getResult
|
2019-09-09 23:55:01 +00:00
|
|
|
if res > uint64(REQ_RESP_MAX_SIZE):
|
2019-08-05 00:00:49 +00:00
|
|
|
return -1
|
|
|
|
else:
|
|
|
|
return int(res)
|
|
|
|
of Overflow:
|
|
|
|
return -1
|
|
|
|
of Incomplete:
|
|
|
|
continue
|
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
proc readMsgBytes(stream: P2PStream,
|
|
|
|
withResponseCode: bool,
|
|
|
|
deadline: Future[void]): Future[Bytes] {.async.} =
|
2019-09-10 05:50:37 +00:00
|
|
|
try:
|
|
|
|
if withResponseCode:
|
|
|
|
var responseCode: byte
|
|
|
|
var readResponseCode = stream.transp.readExactly(addr responseCode, 1)
|
|
|
|
await readResponseCode or deadline
|
|
|
|
if not readResponseCode.finished:
|
|
|
|
return
|
|
|
|
if responseCode > ResponseCode.high.byte: return
|
|
|
|
|
|
|
|
logScope: responseCode = ResponseCode(responseCode)
|
|
|
|
case ResponseCode(responseCode)
|
|
|
|
of InvalidRequest, ServerError:
|
|
|
|
let responseErrMsg = await readChunk(stream, string, false, deadline)
|
|
|
|
debug "P2P request resulted in error", responseErrMsg
|
|
|
|
return
|
|
|
|
of Success:
|
|
|
|
# The response is OK, the execution continues below
|
|
|
|
discard
|
|
|
|
|
|
|
|
var sizePrefix = await readSizePrefix(stream.transp, deadline)
|
2019-09-11 16:45:22 +00:00
|
|
|
if sizePrefix == -1:
|
2019-09-10 05:50:37 +00:00
|
|
|
debug "Failed to read an incoming message size prefix", peer = stream.peer
|
2019-08-05 00:00:49 +00:00
|
|
|
return
|
2019-06-24 02:34:01 +00:00
|
|
|
|
2019-09-10 05:50:37 +00:00
|
|
|
if sizePrefix == 0:
|
|
|
|
debug "Received SSZ with zero size", peer = stream.peer
|
2019-06-24 02:34:01 +00:00
|
|
|
return
|
|
|
|
|
2019-09-10 05:50:37 +00:00
|
|
|
var msgBytes = newSeq[byte](sizePrefix)
|
|
|
|
var readBody = stream.transp.readExactly(addr msgBytes[0], sizePrefix)
|
|
|
|
await readBody or deadline
|
|
|
|
if not readBody.finished: return
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-09-10 05:50:37 +00:00
|
|
|
return msgBytes
|
|
|
|
except TransportIncompleteError:
|
|
|
|
return @[]
|
2019-06-24 02:34:01 +00:00
|
|
|
|
2019-09-08 22:42:12 +00:00
|
|
|
proc readChunk(stream: P2PStream,
|
|
|
|
MsgType: type,
|
|
|
|
withResponseCode: bool,
|
|
|
|
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
2019-09-09 23:55:01 +00:00
|
|
|
var msgBytes = await stream.readMsgBytes(withResponseCode, deadline)
|
2019-03-05 22:54:08 +00:00
|
|
|
try:
|
2019-09-08 22:42:12 +00:00
|
|
|
if msgBytes.len > 0:
|
|
|
|
return some SSZ.decode(msgBytes, MsgType)
|
2019-06-24 02:34:01 +00:00
|
|
|
except SerializationError as err:
|
|
|
|
debug "Failed to decode a network message",
|
|
|
|
msgBytes, errMsg = err.formatMsg("<msg>")
|
2019-03-05 22:54:08 +00:00
|
|
|
return
|
|
|
|
|
2019-09-08 22:42:12 +00:00
|
|
|
proc readResponse(
|
|
|
|
stream: P2PStream,
|
|
|
|
MsgType: type,
|
|
|
|
deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} =
|
|
|
|
|
|
|
|
when MsgType is seq:
|
|
|
|
type E = ElemType(MsgType)
|
|
|
|
var results: MsgType
|
|
|
|
while true:
|
|
|
|
let nextRes = await readChunk(stream, E, true, deadline)
|
|
|
|
if nextRes.isNone: break
|
|
|
|
results.add nextRes.get
|
|
|
|
if results.len > 0:
|
|
|
|
return some(results)
|
|
|
|
else:
|
|
|
|
return await readChunk(stream, MsgType, true, deadline)
|
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes =
|
|
|
|
var s = init OutputStream
|
|
|
|
s.append byte(responseCode)
|
|
|
|
s.appendVarint errMsg.len
|
|
|
|
s.appendValue SSZ, errMsg
|
|
|
|
s.getOutput
|
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
proc sendErrorResponse(peer: Peer,
|
|
|
|
stream: P2PStream,
|
|
|
|
err: ref SerializationError,
|
|
|
|
msgName: string,
|
|
|
|
msgBytes: Bytes) {.async.} =
|
|
|
|
debug "Received an invalid request",
|
|
|
|
peer, msgName, msgBytes, errMsg = err.formatMsg("<msg>")
|
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg"))
|
|
|
|
discard await stream.transp.write(responseBytes)
|
2019-06-24 02:34:01 +00:00
|
|
|
await stream.close()
|
|
|
|
|
|
|
|
proc sendErrorResponse(peer: Peer,
|
|
|
|
stream: P2PStream,
|
|
|
|
responseCode: ResponseCode,
|
|
|
|
errMsg: string) {.async.} =
|
2019-08-05 00:00:49 +00:00
|
|
|
debug "Error processing request", peer, responseCode, errMsg
|
2019-06-24 02:34:01 +00:00
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
let responseBytes = encodeErrorMsg(ServerError, errMsg)
|
|
|
|
discard await stream.transp.write(responseBytes)
|
2019-06-24 02:34:01 +00:00
|
|
|
await stream.close()
|
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
proc writeSizePrefix(transp: StreamTransport, size: uint64) {.async.} =
|
|
|
|
var
|
|
|
|
varintBuf: array[10, byte]
|
|
|
|
varintSize = vsizeof(size)
|
|
|
|
cursor = createWriteCursor(varintBuf)
|
|
|
|
cursor.appendVarint size
|
|
|
|
var sent = await transp.write(varintBuf[0 ..< varintSize])
|
|
|
|
if sent != varintSize:
|
|
|
|
raise newException(TransmissionError, "Failed to deliver size prefix")
|
|
|
|
|
2019-09-09 23:55:01 +00:00
|
|
|
proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} =
|
|
|
|
var deadline = sleepAsync RESP_TIMEOUT
|
|
|
|
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
|
|
|
await streamFut or deadline
|
|
|
|
if not streamFut.finished:
|
|
|
|
# TODO: we are returning here because the deadline passed, but
|
|
|
|
# the stream can still be opened eventually a bit later. Who is
|
|
|
|
# going to close it then?
|
|
|
|
raise newException(TransmissionError, "Failed to open LibP2P stream")
|
|
|
|
|
|
|
|
let stream = streamFut.read
|
|
|
|
defer:
|
2019-09-11 15:01:36 +00:00
|
|
|
await safeClose(stream)
|
2019-09-09 23:55:01 +00:00
|
|
|
|
|
|
|
var s = init OutputStream
|
|
|
|
s.appendVarint requestBytes.len.uint64
|
|
|
|
s.append requestBytes
|
|
|
|
let bytes = s.getOutput
|
|
|
|
let sent = await stream.transp.write(bytes)
|
|
|
|
if sent != bytes.len:
|
2019-08-05 00:00:49 +00:00
|
|
|
raise newException(TransmissionError, "Failed to deliver msg bytes")
|
|
|
|
|
2019-09-10 01:51:30 +00:00
|
|
|
template raiseMaxRespSizeError =
|
|
|
|
raise newException(ResponseSizeLimitReached, "Response size limit reached")
|
|
|
|
|
|
|
|
# TODO There is too much duplication in the responder functions, but
|
|
|
|
# I hope to reduce this when I increse the reliance on output streams.
|
|
|
|
proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} =
|
2019-09-09 02:39:44 +00:00
|
|
|
var s = init OutputStream
|
|
|
|
s.append byte(Success)
|
2019-09-09 23:55:01 +00:00
|
|
|
s.appendVarint payload.len.uint64
|
2019-09-09 02:39:44 +00:00
|
|
|
s.append payload
|
|
|
|
let bytes = s.getOutput
|
2019-09-10 01:51:30 +00:00
|
|
|
|
|
|
|
let sent = await responder.stream.transp.write(bytes)
|
2019-09-09 02:39:44 +00:00
|
|
|
if sent != bytes.len:
|
|
|
|
raise newException(TransmissionError, "Failed to deliver all bytes")
|
|
|
|
|
2019-09-10 01:51:30 +00:00
|
|
|
proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} =
|
2019-09-09 02:39:44 +00:00
|
|
|
var s = init OutputStream
|
|
|
|
s.append byte(Success)
|
|
|
|
s.appendValue SSZ, sizePrefixed(val)
|
|
|
|
let bytes = s.getOutput
|
2019-09-10 01:51:30 +00:00
|
|
|
|
|
|
|
let sent = await responder.stream.transp.write(bytes)
|
2019-09-09 02:39:44 +00:00
|
|
|
if sent != bytes.len:
|
|
|
|
raise newException(TransmissionError, "Failed to deliver all bytes")
|
|
|
|
|
2019-09-10 01:51:30 +00:00
|
|
|
proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} =
|
2019-09-09 02:39:44 +00:00
|
|
|
var s = init OutputStream
|
|
|
|
for chunk in chunks:
|
|
|
|
s.append byte(Success)
|
|
|
|
s.appendValue SSZ, sizePrefixed(chunk)
|
2019-09-10 01:51:30 +00:00
|
|
|
|
2019-09-09 02:39:44 +00:00
|
|
|
let bytes = s.getOutput
|
2019-09-10 01:51:30 +00:00
|
|
|
let sent = await responder.stream.transp.write(bytes)
|
2019-06-24 02:34:01 +00:00
|
|
|
if sent != bytes.len:
|
|
|
|
raise newException(TransmissionError, "Failed to deliver all bytes")
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes,
|
|
|
|
ResponseMsg: type,
|
2019-06-24 02:34:01 +00:00
|
|
|
timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} =
|
2019-06-10 23:20:18 +00:00
|
|
|
var deadline = sleepAsync timeout
|
2019-09-09 23:55:01 +00:00
|
|
|
|
2019-06-10 23:20:18 +00:00
|
|
|
# Open a new LibP2P stream
|
|
|
|
var streamFut = peer.network.daemon.openStream(peer.id, @[protocolId])
|
|
|
|
await streamFut or deadline
|
|
|
|
if not streamFut.finished:
|
2019-09-09 23:55:01 +00:00
|
|
|
# TODO: we are returning here because the deadline passed, but
|
|
|
|
# the stream can still be opened eventually a bit later. Who is
|
|
|
|
# going to close it then?
|
2019-06-10 23:20:18 +00:00
|
|
|
return none(ResponseMsg)
|
|
|
|
|
|
|
|
let stream = streamFut.read
|
2019-09-09 23:55:01 +00:00
|
|
|
defer:
|
2019-09-11 15:01:36 +00:00
|
|
|
await safeClose(stream)
|
2019-08-05 00:00:49 +00:00
|
|
|
|
2019-09-09 23:55:01 +00:00
|
|
|
# Send the request
|
|
|
|
var s = init OutputStream
|
|
|
|
s.appendVarint requestBytes.len.uint64
|
|
|
|
s.append requestBytes
|
|
|
|
let bytes = s.getOutput
|
|
|
|
let sent = await stream.transp.write(bytes)
|
|
|
|
if sent != bytes.len:
|
2019-06-10 23:20:18 +00:00
|
|
|
await disconnectAndRaise(peer, FaultOrError, "Incomplete send")
|
|
|
|
|
|
|
|
# Read the response
|
2019-09-08 22:42:12 +00:00
|
|
|
return await stream.readResponse(ResponseMsg, deadline)
|
2019-06-24 02:34:01 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc p2pStreamName(MsgType: type): string =
|
|
|
|
mixin msgProtocol, protocolInfo, msgId
|
|
|
|
MsgType.msgProtocol.protocolInfo.messages[MsgType.msgId].libp2pProtocol
|
|
|
|
|
|
|
|
proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer =
|
|
|
|
new result
|
|
|
|
result.id = id
|
|
|
|
result.network = network
|
|
|
|
result.connectionState = Connected
|
2019-06-21 16:32:52 +00:00
|
|
|
result.maxInactivityAllowed = 15.minutes # TODO: Read this from the config
|
2019-03-05 22:54:08 +00:00
|
|
|
newSeq result.protocolStates, allProtocols.len
|
|
|
|
for i in 0 ..< allProtocols.len:
|
|
|
|
let proto = allProtocols[i]
|
|
|
|
if proto.peerStateInitializer != nil:
|
|
|
|
result.protocolStates[i] = proto.peerStateInitializer(result)
|
|
|
|
|
|
|
|
proc performProtocolHandshakes*(peer: Peer) {.async.} =
|
|
|
|
var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len)
|
|
|
|
for protocol in allProtocols:
|
|
|
|
if protocol.handshake != nil:
|
|
|
|
subProtocolsHandshakes.add((protocol.handshake)(peer, nil))
|
|
|
|
|
|
|
|
await all(subProtocolsHandshakes)
|
|
|
|
|
2019-06-21 16:32:52 +00:00
|
|
|
template initializeConnection*(peer: Peer): auto =
|
|
|
|
performProtocolHandshakes(peer)
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc initProtocol(name: string,
|
|
|
|
peerInit: PeerStateInitializer,
|
|
|
|
networkInit: NetworkStateInitializer): ProtocolInfoObj =
|
|
|
|
result.name = name
|
|
|
|
result.messages = @[]
|
|
|
|
result.peerStateInitializer = peerInit
|
|
|
|
result.networkStateInitializer = networkInit
|
|
|
|
|
|
|
|
proc registerMsg(protocol: ProtocolInfo,
|
|
|
|
name: string,
|
2019-05-29 08:21:03 +00:00
|
|
|
thunk: ThunkProc,
|
2019-03-05 22:54:08 +00:00
|
|
|
libp2pProtocol: string,
|
|
|
|
printer: MessageContentPrinter) =
|
|
|
|
protocol.messages.add MessageInfo(name: name,
|
|
|
|
thunk: thunk,
|
|
|
|
libp2pProtocol: libp2pProtocol,
|
|
|
|
printer: printer)
|
|
|
|
|
|
|
|
proc getRequestProtoName(fn: NimNode): NimNode =
|
2019-08-05 00:00:49 +00:00
|
|
|
# `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes
|
|
|
|
# (TODO: file as an issue)
|
|
|
|
|
|
|
|
let pragmas = fn.pragma
|
|
|
|
if pragmas.kind == nnkPragma and pragmas.len > 0:
|
|
|
|
for pragma in pragmas:
|
|
|
|
if pragma.len > 0 and $pragma[0] == "libp2pProtocol":
|
2019-09-08 22:03:41 +00:00
|
|
|
let protoName = $(pragma[1])
|
|
|
|
let protoVer = $(pragma[2].intVal)
|
|
|
|
return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz")
|
2019-08-05 00:00:49 +00:00
|
|
|
|
|
|
|
return newLit("")
|
2019-06-03 17:07:50 +00:00
|
|
|
|
|
|
|
proc init*[MsgType](T: type Responder[MsgType],
|
|
|
|
peer: Peer, stream: P2PStream): T =
|
|
|
|
T(UntypedResponder(peer: peer, stream: stream))
|
2019-05-22 07:13:15 +00:00
|
|
|
|
2019-09-09 02:39:44 +00:00
|
|
|
import
|
|
|
|
typetraits
|
|
|
|
|
|
|
|
template write*[M](r: var Responder[M], val: auto): auto =
|
|
|
|
mixin send
|
|
|
|
type Msg = M
|
|
|
|
type MsgRec = RecType(Msg)
|
|
|
|
when MsgRec is seq|openarray:
|
|
|
|
type E = ElemType(MsgRec)
|
|
|
|
when val is E:
|
2019-09-10 01:51:30 +00:00
|
|
|
sendResponseChunkObj(UntypedResponder(r), val)
|
2019-09-09 02:39:44 +00:00
|
|
|
elif val is MsgRec:
|
2019-09-10 01:51:30 +00:00
|
|
|
sendResponseChunks(UntypedResponder(r), val)
|
2019-09-09 02:39:44 +00:00
|
|
|
else:
|
|
|
|
static: echo "BAD TYPE ", name(E), " vs ", name(type(val))
|
|
|
|
{.fatal: "bad".}
|
|
|
|
else:
|
|
|
|
send(r, val)
|
|
|
|
|
2019-06-05 02:00:07 +00:00
|
|
|
proc implementSendProcBody(sendProc: SendProc) =
|
|
|
|
let
|
|
|
|
msg = sendProc.msg
|
|
|
|
UntypedResponder = bindSym "UntypedResponder"
|
2019-06-24 02:34:01 +00:00
|
|
|
await = ident "await"
|
2019-06-05 02:00:07 +00:00
|
|
|
|
|
|
|
proc sendCallGenerator(peer, bytes: NimNode): NimNode =
|
|
|
|
if msg.kind != msgResponse:
|
|
|
|
let msgProto = getRequestProtoName(msg.procDef)
|
|
|
|
case msg.kind
|
|
|
|
of msgRequest:
|
2019-06-24 02:34:01 +00:00
|
|
|
let
|
|
|
|
timeout = msg.timeoutParam[0]
|
2019-09-08 22:03:41 +00:00
|
|
|
ResponseRecord = msg.response.recName
|
2019-06-24 02:34:01 +00:00
|
|
|
quote:
|
|
|
|
makeEth2Request(`peer`, `msgProto`, `bytes`,
|
|
|
|
`ResponseRecord`, `timeout`)
|
2019-06-05 02:00:07 +00:00
|
|
|
else:
|
2019-09-09 23:55:01 +00:00
|
|
|
quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`)
|
2019-06-05 02:00:07 +00:00
|
|
|
else:
|
2019-09-10 01:51:30 +00:00
|
|
|
quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`)
|
2019-06-05 02:00:07 +00:00
|
|
|
|
2019-08-05 00:00:49 +00:00
|
|
|
sendProc.useStandardBody(nil, nil, sendCallGenerator)
|
2019-06-05 02:00:07 +00:00
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
2019-03-05 22:54:08 +00:00
|
|
|
var
|
2019-05-31 18:36:32 +00:00
|
|
|
Format = ident "SSZ"
|
2019-05-30 18:57:12 +00:00
|
|
|
Responder = bindSym "Responder"
|
2019-03-05 22:54:08 +00:00
|
|
|
DaemonAPI = bindSym "DaemonAPI"
|
|
|
|
P2PStream = ident "P2PStream"
|
2019-06-24 02:34:01 +00:00
|
|
|
OutputStream = bindSym "OutputStream"
|
2019-03-05 22:54:08 +00:00
|
|
|
Peer = bindSym "Peer"
|
2019-05-22 07:13:15 +00:00
|
|
|
Eth2Node = bindSym "Eth2Node"
|
2019-03-05 22:54:08 +00:00
|
|
|
messagePrinter = bindSym "messagePrinter"
|
2019-05-22 07:13:15 +00:00
|
|
|
milliseconds = bindSym "milliseconds"
|
|
|
|
registerMsg = bindSym "registerMsg"
|
|
|
|
initProtocol = bindSym "initProtocol"
|
|
|
|
bindSymOp = bindSym "bindSym"
|
2019-06-24 02:34:01 +00:00
|
|
|
errVar = ident "err"
|
|
|
|
msgVar = ident "msg"
|
|
|
|
msgBytesVar = ident "msgBytes"
|
2019-06-05 02:00:07 +00:00
|
|
|
daemonVar = ident "daemon"
|
2019-05-22 07:13:15 +00:00
|
|
|
await = ident "await"
|
2019-06-03 17:07:50 +00:00
|
|
|
|
|
|
|
p.useRequestIds = false
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
new result
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
result.PeerType = Peer
|
|
|
|
result.NetworkType = Eth2Node
|
|
|
|
result.registerProtocol = bindSym "registerProtocol"
|
|
|
|
result.setEventHandlers = bindSym "setEventHandlers"
|
2019-05-29 08:21:03 +00:00
|
|
|
result.SerializationFormat = Format
|
2019-05-30 18:57:12 +00:00
|
|
|
result.ResponderType = Responder
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
result.afterProtocolInit = proc (p: P2PProtocol) =
|
2019-06-21 16:32:52 +00:00
|
|
|
p.onPeerConnected.params.add newIdentDefs(streamVar, P2PStream)
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-30 18:57:12 +00:00
|
|
|
result.implementMsg = proc (msg: Message) =
|
2019-03-05 22:54:08 +00:00
|
|
|
let
|
2019-05-30 18:57:12 +00:00
|
|
|
protocol = msg.protocol
|
2019-06-05 02:00:07 +00:00
|
|
|
msgName = $msg.ident
|
2019-06-24 02:34:01 +00:00
|
|
|
msgNameLit = newLit msgName
|
2019-09-08 22:03:41 +00:00
|
|
|
msgRecName = msg.recName
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-05 02:00:07 +00:00
|
|
|
if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest:
|
2019-06-03 17:07:50 +00:00
|
|
|
# Request procs need an extra param - the stream where the response
|
|
|
|
# should be written:
|
2019-06-05 02:00:07 +00:00
|
|
|
msg.userHandler.params.insert(2, newIdentDefs(streamVar, P2PStream))
|
2019-06-03 17:07:50 +00:00
|
|
|
msg.initResponderCall.add streamVar
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-21 16:32:52 +00:00
|
|
|
##
|
|
|
|
## Implemenmt Thunk
|
|
|
|
##
|
2019-06-24 02:34:01 +00:00
|
|
|
var thunkName = ident(msgName & "_thunk")
|
2019-09-09 23:55:01 +00:00
|
|
|
let awaitUserHandler = msg.genAwaitUserHandler(msgVar, [peerVar, streamVar])
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-03 17:07:50 +00:00
|
|
|
let tracing = when tracingEnabled:
|
2019-06-24 02:34:01 +00:00
|
|
|
quote: logReceivedMsg(`streamVar`.peer, `msgVar`.get)
|
2019-06-03 17:07:50 +00:00
|
|
|
else:
|
|
|
|
newStmtList()
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
msg.defineThunk quote do:
|
|
|
|
proc `thunkName`(`daemonVar`: `DaemonAPI`,
|
|
|
|
`streamVar`: `P2PStream`) {.async, gcsafe.} =
|
2019-09-09 23:55:01 +00:00
|
|
|
defer:
|
2019-09-11 15:01:36 +00:00
|
|
|
`await` safeClose(`streamVar`)
|
2019-09-09 23:55:01 +00:00
|
|
|
|
2019-06-24 02:34:01 +00:00
|
|
|
let
|
2019-09-09 23:55:01 +00:00
|
|
|
`deadlineVar` = sleepAsync RESP_TIMEOUT
|
2019-06-24 02:34:01 +00:00
|
|
|
`msgBytesVar` = `await` readMsgBytes(`streamVar`, false, `deadlineVar`)
|
|
|
|
`peerVar` = peerFromStream(`daemonVar`, `streamVar`)
|
|
|
|
|
|
|
|
if `msgBytesVar`.len == 0:
|
2019-09-09 23:55:01 +00:00
|
|
|
`await` sendErrorResponse(`peerVar`, `streamVar`,
|
|
|
|
ServerError, readTimeoutErrorMsg)
|
2019-06-24 02:34:01 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
var `msgVar`: `msgRecName`
|
|
|
|
try:
|
|
|
|
`msgVar` = decode(`Format`, `msgBytesVar`, `msgRecName`)
|
|
|
|
except SerializationError as `errVar`:
|
|
|
|
`await` sendErrorResponse(`peerVar`, `streamVar`, `errVar`,
|
|
|
|
`msgNameLit`, `msgBytesVar`)
|
|
|
|
return
|
2019-09-10 23:45:08 +00:00
|
|
|
except Exception as err:
|
|
|
|
# TODO. This is temporary code that should be removed after interop.
|
|
|
|
# It can be enabled only in certain diagnostic builds where it should
|
|
|
|
# re-raise the exception.
|
|
|
|
debug "Crash during serialization", inputBytes = toHex(`msgBytesVar`),
|
|
|
|
msgName = `msgNameLit`,
|
|
|
|
deserializedType = astToStr(`msgRecName`)
|
|
|
|
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, err.msg)
|
2019-06-24 02:34:01 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
`tracing`
|
|
|
|
`awaitUserHandler`
|
2019-09-10 01:51:30 +00:00
|
|
|
except ResponseSizeLimitReached:
|
|
|
|
# The response size limit is currently handled with an exception in
|
|
|
|
# order to make it easier to switch to an alternative policy when it
|
|
|
|
# will be signalled with an error response code (and to avoid making
|
|
|
|
# the `response` API in the high-level protocols more complicated for now).
|
|
|
|
chronicles.debug "response size limit reached", peer, reqName = `msgNameLit`
|
2019-06-24 02:34:01 +00:00
|
|
|
except CatchableError as `errVar`:
|
|
|
|
`await` sendErrorResponse(`peerVar`, `streamVar`, ServerError, `errVar`.msg)
|
|
|
|
|
|
|
|
##
|
|
|
|
## Implement Senders and Handshake
|
|
|
|
##
|
|
|
|
if msg.kind == msgHandshake:
|
2019-09-08 22:03:41 +00:00
|
|
|
macros.error "Handshake messages are not supported in LibP2P protocols"
|
2019-06-24 02:34:01 +00:00
|
|
|
else:
|
|
|
|
var sendProc = msg.createSendProc()
|
|
|
|
implementSendProcBody sendProc
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-30 18:57:12 +00:00
|
|
|
protocol.outProcRegistrations.add(
|
2019-05-22 07:13:15 +00:00
|
|
|
newCall(registerMsg,
|
2019-05-30 18:57:12 +00:00
|
|
|
protocol.protocolInfoVar,
|
2019-06-24 02:34:01 +00:00
|
|
|
msgNameLit,
|
2019-03-05 22:54:08 +00:00
|
|
|
thunkName,
|
2019-06-05 02:00:07 +00:00
|
|
|
getRequestProtoName(msg.procDef),
|
2019-05-22 07:13:15 +00:00
|
|
|
newTree(nnkBracketExpr, messagePrinter, msgRecName)))
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-05-22 07:13:15 +00:00
|
|
|
result.implementProtocolInit = proc (p: P2PProtocol): NimNode =
|
|
|
|
return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit)
|
2019-03-05 22:54:08 +00:00
|
|
|
|