This commit only moves files around (no need for review)
In the next commit, I'm going to squash all the changes in the LES. This commit just sets the directory tree to its final state, so the diffs in the follow up commits are easier to review. Please note that eth.nim was renamed to eth_protocol.nim, because it's not advisable to have a module name that shares the name of a protocol identifier (this creates hard to figure out ambiguity errors)
This commit is contained in:
parent
e5ff8aea2a
commit
b38804f873
1531
eth_p2p.nim
1531
eth_p2p.nim
File diff suppressed because it is too large
Load Diff
|
@ -1,158 +1,3 @@
|
|||
#
|
||||
# Ethereum P2P
|
||||
# (c) Copyright 2018
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
#
|
||||
|
||||
## This module implements the Ethereum Wire Protocol:
|
||||
## https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol
|
||||
|
||||
import
|
||||
random, algorithm, hashes,
|
||||
asyncdispatch2, rlp, stint, eth_common, chronicles,
|
||||
../../eth_p2p
|
||||
|
||||
type
|
||||
NewBlockHashesAnnounce* = object
|
||||
hash: KeccakHash
|
||||
number: uint
|
||||
|
||||
NewBlockAnnounce* = object
|
||||
header: BlockHeader
|
||||
body {.rlpInline.}: BlockBody
|
||||
|
||||
NetworkState = object
|
||||
syncing: bool
|
||||
|
||||
PeerState = object
|
||||
initialized: bool
|
||||
bestBlockHash: KeccakHash
|
||||
bestDifficulty: DifficultyInt
|
||||
|
||||
const
|
||||
maxStateFetch = 384
|
||||
maxBodiesFetch = 128
|
||||
maxReceiptsFetch = 256
|
||||
maxHeadersFetch = 192
|
||||
protocolVersion = 63
|
||||
minPeersToStartSync = 2 # Wait for consensus of at least this number of peers before syncing
|
||||
|
||||
rlpxProtocol eth, protocolVersion:
|
||||
useRequestIds = false
|
||||
|
||||
type State = PeerState
|
||||
|
||||
onPeerConnected do (peer: Peer):
|
||||
let
|
||||
network = peer.network
|
||||
chain = network.chain
|
||||
bestBlock = chain.getBestBlockHeader
|
||||
|
||||
await peer.status(protocolVersion,
|
||||
network.networkId,
|
||||
bestBlock.difficulty,
|
||||
bestBlock.blockHash,
|
||||
chain.genesisHash)
|
||||
|
||||
let m = await peer.waitSingleMsg(eth.status)
|
||||
if m.networkId == network.networkId and m.genesisHash == chain.genesisHash:
|
||||
debug "Suitable peer", peer
|
||||
else:
|
||||
raise newException(UselessPeerError, "Eth handshake params mismatch")
|
||||
peer.state.initialized = true
|
||||
peer.state.bestDifficulty = m.totalDifficulty
|
||||
peer.state.bestBlockHash = m.bestHash
|
||||
|
||||
proc status(peer: Peer,
|
||||
protocolVersion: uint,
|
||||
networkId: uint,
|
||||
totalDifficulty: DifficultyInt,
|
||||
bestHash: KeccakHash,
|
||||
genesisHash: KeccakHash) =
|
||||
# verify that the peer is on the same chain:
|
||||
if peer.network.networkId != networkId or
|
||||
peer.network.chain.genesisHash != genesisHash:
|
||||
# TODO: Is there a more specific reason here?
|
||||
await peer.disconnect(SubprotocolReason)
|
||||
return
|
||||
|
||||
peer.state.bestBlockHash = bestHash
|
||||
peer.state.bestDifficulty = totalDifficulty
|
||||
|
||||
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
||||
discard
|
||||
|
||||
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
||||
discard
|
||||
|
||||
requestResponse:
|
||||
proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
|
||||
if request.maxResults > uint64(maxHeadersFetch):
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
var headers = newSeqOfCap[BlockHeader](request.maxResults)
|
||||
let chain = peer.network.chain
|
||||
var foundBlock: BlockHeader
|
||||
|
||||
if chain.getBlockHeader(request.startBlock, foundBlock):
|
||||
headers.add foundBlock
|
||||
|
||||
while uint64(headers.len) < request.maxResults:
|
||||
if not chain.getSuccessorHeader(foundBlock, foundBlock):
|
||||
break
|
||||
headers.add foundBlock
|
||||
|
||||
await peer.blockHeaders(headers)
|
||||
|
||||
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
||||
|
||||
requestResponse:
|
||||
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
if hashes.len > maxBodiesFetch:
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
var chain = peer.network.chain
|
||||
|
||||
var blockBodies = newSeqOfCap[BlockBody](hashes.len)
|
||||
for hash in hashes:
|
||||
let blockBody = chain.getBlockBody(hash)
|
||||
if not blockBody.isNil:
|
||||
# TODO: should there be an else clause here.
|
||||
# Is the peer responsible of figuring out that
|
||||
# some blocks were not found?
|
||||
blockBodies.add deref(blockBody)
|
||||
|
||||
await peer.blockBodies(blockBodies)
|
||||
|
||||
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
||||
|
||||
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
||||
discard
|
||||
|
||||
nextID 13
|
||||
|
||||
requestResponse:
|
||||
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
await peer.nodeData([])
|
||||
|
||||
proc nodeData(peer: Peer, data: openarray[Blob]) =
|
||||
discard
|
||||
|
||||
requestResponse:
|
||||
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
await peer.receipts([])
|
||||
|
||||
proc receipts(peer: Peer, receipts: openarray[Receipt]) =
|
||||
discard
|
||||
|
||||
proc hash*(p: Peer): Hash {.inline.} = hash(cast[pointer](p))
|
||||
|
||||
type
|
||||
SyncStatus* = enum
|
||||
syncSuccess
|
|
@ -0,0 +1,227 @@
|
|||
# PeerPool attempts to keep connections to at least min_peers
|
||||
# on the given network.
|
||||
|
||||
const
|
||||
lookupInterval = 5
|
||||
connectLoopSleepMs = 2000
|
||||
|
||||
proc newPeerPool*(network: EthereumNode,
|
||||
chainDb: AbstractChainDB, networkId: uint, keyPair: KeyPair,
|
||||
discovery: DiscoveryProtocol, clientId: string,
|
||||
listenPort = Port(30303), minPeers = 10): PeerPool =
|
||||
new result
|
||||
result.network = network
|
||||
result.keyPair = keyPair
|
||||
result.minPeers = minPeers
|
||||
result.networkId = networkId
|
||||
result.discovery = discovery
|
||||
result.connectedNodes = initTable[Node, Peer]()
|
||||
result.connectingNodes = initSet[Node]()
|
||||
result.observers = initTable[int, PeerObserver]()
|
||||
result.listenPort = listenPort
|
||||
|
||||
template ensureFuture(f: untyped) = asyncCheck f
|
||||
|
||||
proc nodesToConnect(p: PeerPool): seq[Node] {.inline.} =
|
||||
p.discovery.randomNodes(p.minPeers)
|
||||
|
||||
proc addObserver(p: PeerPool, observerId: int, observer: PeerObserver) =
|
||||
assert(observerId notin p.observers)
|
||||
p.observers[observerId] = observer
|
||||
if not observer.onPeerConnected.isNil:
|
||||
for peer in p.connectedNodes.values:
|
||||
observer.onPeerConnected(peer)
|
||||
|
||||
proc delObserver(p: PeerPool, observerId: int) =
|
||||
p.observers.del(observerId)
|
||||
|
||||
proc addObserver*(p: PeerPool, observerId: ref, observer: PeerObserver) {.inline.} =
|
||||
p.addObserver(cast[int](observerId), observer)
|
||||
|
||||
proc delObserver*(p: PeerPool, observerId: ref) {.inline.} =
|
||||
p.delObserver(cast[int](observerId))
|
||||
|
||||
proc stopAllPeers(p: PeerPool) {.async.} =
|
||||
info "Stopping all peers ..."
|
||||
# TODO: ...
|
||||
# await asyncio.gather(
|
||||
# *[peer.stop() for peer in self.connected_nodes.values()])
|
||||
|
||||
# async def stop(self) -> None:
|
||||
# self.cancel_token.trigger()
|
||||
# await self.stop_all_peers()
|
||||
|
||||
proc connect(p: PeerPool, remote: Node): Future[Peer] {.async.} =
|
||||
## Connect to the given remote and return a Peer instance when successful.
|
||||
## Returns nil if the remote is unreachable, times out or is useless.
|
||||
if remote in p.connectedNodes:
|
||||
debug "skipping_connection_to_already_connected_peer", remote
|
||||
return nil
|
||||
|
||||
if remote in p.connectingNodes:
|
||||
# debug "skipping connection"
|
||||
return nil
|
||||
|
||||
debug "Connecting to node", remote
|
||||
p.connectingNodes.incl(remote)
|
||||
result = await p.network.rlpxConnect(remote)
|
||||
p.connectingNodes.excl(remote)
|
||||
|
||||
# expected_exceptions = (
|
||||
# UnreachablePeer, TimeoutError, PeerConnectionLost, HandshakeFailure)
|
||||
# try:
|
||||
# self.logger.debug("Connecting to %s...", remote)
|
||||
# peer = await wait_with_token(
|
||||
# handshake(remote, self.privkey, self.peer_class, self.chaindb, self.network_id),
|
||||
# token=self.cancel_token,
|
||||
# timeout=HANDSHAKE_TIMEOUT)
|
||||
# return peer
|
||||
# except OperationCancelled:
|
||||
# # Pass it on to instruct our main loop to stop.
|
||||
# raise
|
||||
# except expected_exceptions as e:
|
||||
# self.logger.debug("Could not complete handshake with %s: %s", remote, repr(e))
|
||||
# except Exception:
|
||||
# self.logger.exception("Unexpected error during auth/p2p handshake with %s", remote)
|
||||
# return None
|
||||
|
||||
proc lookupRandomNode(p: PeerPool) {.async.} =
|
||||
# This method runs in the background, so we must catch OperationCancelled
|
||||
# ere otherwise asyncio will warn that its exception was never retrieved.
|
||||
try:
|
||||
discard await p.discovery.lookupRandom()
|
||||
except: # OperationCancelled
|
||||
discard
|
||||
p.lastLookupTime = epochTime()
|
||||
|
||||
proc getRandomBootnode(p: PeerPool): seq[Node] =
|
||||
@[p.discovery.bootstrapNodes.rand()]
|
||||
|
||||
proc peerFinished(p: PeerPool, peer: Peer) =
|
||||
## Remove the given peer from our list of connected nodes.
|
||||
## This is passed as a callback to be called when a peer finishes.
|
||||
p.connectedNodes.del(peer.remote)
|
||||
|
||||
for o in p.observers.values:
|
||||
if not o.onPeerDisconnected.isNil:
|
||||
o.onPeerDisconnected(peer)
|
||||
|
||||
proc run(peer: Peer, peerPool: PeerPool) {.async.} =
|
||||
# TODO: This is a stub that should be implemented in rlpx.nim
|
||||
|
||||
try:
|
||||
while true:
|
||||
var (nextMsgId, nextMsgData) = await peer.recvMsg()
|
||||
if nextMsgId == 1:
|
||||
debug "Run got disconnect msg", reason = nextMsgData.listElem(0).toInt(uint32).DisconnectionReason, peer
|
||||
break
|
||||
else:
|
||||
# debug "Got msg: ", msg = nextMsgId
|
||||
await peer.dispatchMsg(nextMsgId, nextMsgData)
|
||||
except:
|
||||
error "Failed to read from peer",
|
||||
err = getCurrentExceptionMsg(),
|
||||
stackTrace = getCurrentException().getStackTrace()
|
||||
|
||||
peerPool.peerFinished(peer)
|
||||
|
||||
proc connectToNode*(p: PeerPool, n: Node) {.async.} =
|
||||
let peer = await p.connect(n)
|
||||
if not peer.isNil:
|
||||
info "Connection established", peer
|
||||
ensureFuture peer.run(p)
|
||||
|
||||
p.connectedNodes[peer.remote] = peer
|
||||
for o in p.observers.values:
|
||||
if not o.onPeerConnected.isNil:
|
||||
o.onPeerConnected(peer)
|
||||
|
||||
proc connectToNodes(p: PeerPool, nodes: seq[Node]) {.async.} =
|
||||
for node in nodes:
|
||||
discard p.connectToNode(node)
|
||||
|
||||
# # TODO: Consider changing connect() to raise an exception instead of
|
||||
# # returning None, as discussed in
|
||||
# # https://github.com/ethereum/py-evm/pull/139#discussion_r152067425
|
||||
# echo "Connecting to node: ", node
|
||||
# let peer = await p.connect(node)
|
||||
# if not peer.isNil:
|
||||
# info "Successfully connected to ", peer
|
||||
# ensureFuture peer.run(p)
|
||||
|
||||
# p.connectedNodes[peer.remote] = peer
|
||||
# # for subscriber in self._subscribers:
|
||||
# # subscriber.register_peer(peer)
|
||||
# if p.connectedNodes.len >= p.minPeers:
|
||||
# return
|
||||
|
||||
proc maybeConnectToMorePeers(p: PeerPool) {.async.} =
|
||||
## Connect to more peers if we're not yet connected to at least self.minPeers.
|
||||
if p.connectedNodes.len >= p.minPeers:
|
||||
# debug "pool already connected to enough peers (sleeping)", count = p.connectedNodes
|
||||
return
|
||||
|
||||
if p.lastLookupTime + lookupInterval < epochTime():
|
||||
ensureFuture p.lookupRandomNode()
|
||||
|
||||
let debugEnode = getEnv("ETH_DEBUG_ENODE")
|
||||
if debugEnode.len != 0:
|
||||
await p.connectToNode(newNode(debugEnode))
|
||||
else:
|
||||
await p.connectToNodes(p.nodesToConnect())
|
||||
|
||||
# In some cases (e.g ROPSTEN or private testnets), the discovery table might
|
||||
# be full of bad peers, so if we can't connect to any peers we try a random
|
||||
# bootstrap node as well.
|
||||
if p.connectedNodes.len == 0:
|
||||
await p.connectToNodes(p.getRandomBootnode())
|
||||
|
||||
proc run(p: PeerPool) {.async.} =
|
||||
info "Running PeerPool..."
|
||||
p.running = true
|
||||
while p.running:
|
||||
var dropConnections = false
|
||||
try:
|
||||
await p.maybeConnectToMorePeers()
|
||||
except Exception as e:
|
||||
# Most unexpected errors should be transient, so we log and restart from
|
||||
# scratch.
|
||||
error "Unexpected PeerPool error, restarting",
|
||||
err = getCurrentExceptionMsg(),
|
||||
stackTrace = e.getStackTrace()
|
||||
dropConnections = true
|
||||
|
||||
if dropConnections:
|
||||
await p.stopAllPeers()
|
||||
|
||||
await sleepAsync(connectLoopSleepMs)
|
||||
|
||||
proc start*(p: PeerPool) =
|
||||
if not p.running:
|
||||
asyncCheck p.run()
|
||||
|
||||
proc len*(p: PeerPool): int = p.connectedNodes.len
|
||||
# @property
|
||||
# def peers(self) -> List[BasePeer]:
|
||||
# peers = list(self.connected_nodes.values())
|
||||
# # Shuffle the list of peers so that dumb callsites are less likely to send
|
||||
# # all requests to
|
||||
# # a single peer even if they always pick the first one from the list.
|
||||
# random.shuffle(peers)
|
||||
# return peers
|
||||
|
||||
# async def get_random_peer(self) -> BasePeer:
|
||||
# while not self.peers:
|
||||
# self.logger.debug("No connected peers, sleeping a bit")
|
||||
# await asyncio.sleep(0.5)
|
||||
# return random.choice(self.peers)
|
||||
|
||||
iterator peers*(p: PeerPool): Peer =
|
||||
for remote, peer in p.connectedNodes:
|
||||
yield peer
|
||||
|
||||
iterator peers*(p: PeerPool, Protocol: type): Peer =
|
||||
for peer in p.peers:
|
||||
if peer.supports(Protocol):
|
||||
yield peer
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
block:
|
||||
type
|
||||
EthereumNode* = ref object
|
||||
networkId*: uint
|
||||
chain*: AbstractChainDB
|
||||
clientId*: string
|
||||
connectionState*: ConnectionState
|
||||
keys*: KeyPair
|
||||
address*: Address
|
||||
rlpxCapabilities: seq[Capability]
|
||||
rlpxProtocols: seq[ProtocolInfo]
|
||||
listeningServer: StreamServer
|
||||
protocolStates: seq[RootRef]
|
||||
discovery: DiscoveryProtocol
|
||||
peerPool*: PeerPool
|
||||
|
||||
Peer* = ref object
|
||||
transp: StreamTransport
|
||||
dispatcher: Dispatcher
|
||||
nextReqId: int
|
||||
network*: EthereumNode
|
||||
secretsState: SecretState
|
||||
connectionState: ConnectionState
|
||||
remote*: Node
|
||||
protocolStates: seq[RootRef]
|
||||
outstandingRequests: seq[Deque[OutstandingRequest]]
|
||||
awaitedMessages: seq[FutureBase]
|
||||
|
||||
OutstandingRequest = object
|
||||
reqId: int
|
||||
future: FutureBase
|
||||
timeoutAt: uint64
|
||||
|
||||
PeerPool* = ref object
|
||||
network: EthereumNode
|
||||
keyPair: KeyPair
|
||||
networkId: uint
|
||||
minPeers: int
|
||||
clientId: string
|
||||
discovery: DiscoveryProtocol
|
||||
lastLookupTime: float
|
||||
connectedNodes: Table[Node, Peer]
|
||||
connectingNodes: HashSet[Node]
|
||||
running: bool
|
||||
listenPort*: Port
|
||||
observers: Table[int, PeerObserver]
|
||||
|
||||
MessageInfo* = object
|
||||
id*: int
|
||||
name*: string
|
||||
thunk*: MessageHandler
|
||||
printer*: MessageContentPrinter
|
||||
requestResolver: RequestResolver
|
||||
nextMsgResolver: NextMsgResolver
|
||||
|
||||
CapabilityName* = array[3, char]
|
||||
|
||||
Capability* = object
|
||||
name*: CapabilityName
|
||||
version*: int
|
||||
|
||||
ProtocolInfo* = ref object
|
||||
name*: CapabilityName
|
||||
version*: int
|
||||
messages*: seq[MessageInfo]
|
||||
index: int # the position of the protocol in the
|
||||
# ordered list of supported protocols
|
||||
peerStateInitializer: PeerStateInitializer
|
||||
networkStateInitializer: NetworkStateInitializer
|
||||
handshake: HandshakeStep
|
||||
disconnectHandler: DisconnectionHandler
|
||||
|
||||
Dispatcher = ref object
|
||||
# The dispatcher stores the mapping of negotiated message IDs between
|
||||
# two connected peers. The dispatcher objects are shared between
|
||||
# connections running with the same set of supported protocols.
|
||||
#
|
||||
# `protocolOffsets` will hold one slot of each locally supported
|
||||
# protocol. If the other peer also supports the protocol, the stored
|
||||
# offset indicates the numeric value of the first message of the protocol
|
||||
# (for this particular connection). If the other peer doesn't support the
|
||||
# particular protocol, the stored offset is -1.
|
||||
#
|
||||
# `messages` holds a mapping from valid message IDs to their handler procs.
|
||||
#
|
||||
protocolOffsets: seq[int]
|
||||
messages: seq[ptr MessageInfo]
|
||||
|
||||
PeerObserver* = object
|
||||
onPeerConnected*: proc(p: Peer)
|
||||
onPeerDisconnected*: proc(p: Peer)
|
||||
|
||||
MessageHandler = proc(x: Peer, data: Rlp): Future[void]
|
||||
MessageContentPrinter = proc(msg: pointer): string
|
||||
RequestResolver = proc(msg: pointer, future: FutureBase)
|
||||
NextMsgResolver = proc(msgData: Rlp, future: FutureBase)
|
||||
PeerStateInitializer = proc(peer: Peer): RootRef
|
||||
NetworkStateInitializer = proc(network: EthereumNode): RootRef
|
||||
HandshakeStep = proc(peer: Peer): Future[void]
|
||||
DisconnectionHandler = proc(peer: Peer,
|
||||
reason: DisconnectionReason): Future[void]
|
||||
|
||||
RlpxMessageKind* = enum
|
||||
rlpxNotification,
|
||||
rlpxRequest,
|
||||
rlpxResponse
|
||||
|
||||
ConnectionState* = enum
|
||||
None,
|
||||
Connecting,
|
||||
Connected,
|
||||
Disconnecting,
|
||||
Disconnected
|
||||
|
||||
DisconnectionReason* = enum
|
||||
DisconnectRequested,
|
||||
TcpError,
|
||||
BreachOfProtocol,
|
||||
UselessPeer,
|
||||
TooManyPeers,
|
||||
AlreadyConnected,
|
||||
IncompatibleProtocolVersion,
|
||||
NullNodeIdentityReceived,
|
||||
ClientQuitting,
|
||||
UnexpectedIdentity,
|
||||
SelfConnection,
|
||||
MessageTimeout,
|
||||
SubprotocolReason = 0x10
|
||||
|
||||
UnsupportedProtocol* = object of Exception
|
||||
# This is raised when you attempt to send a message from a particular
|
||||
# protocol to a peer that doesn't support the protocol.
|
||||
|
||||
MalformedMessageError* = object of Exception
|
||||
|
||||
UnexpectedDisconnectError* = object of Exception
|
||||
reason*: DisconnectionReason
|
||||
|
||||
UselessPeerError* = object of Exception
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,156 @@
|
|||
#
|
||||
# Ethereum P2P
|
||||
# (c) Copyright 2018
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
#
|
||||
|
||||
## This module implements the Ethereum Wire Protocol:
|
||||
## https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol
|
||||
|
||||
import
|
||||
random, algorithm, hashes,
|
||||
asyncdispatch2, rlp, stint, eth_common, chronicles,
|
||||
../../eth_p2p
|
||||
|
||||
type
|
||||
NewBlockHashesAnnounce* = object
|
||||
hash: KeccakHash
|
||||
number: uint
|
||||
|
||||
NewBlockAnnounce* = object
|
||||
header: BlockHeader
|
||||
body {.rlpInline.}: BlockBody
|
||||
|
||||
NetworkState = object
|
||||
syncing: bool
|
||||
|
||||
PeerState = object
|
||||
initialized: bool
|
||||
bestBlockHash: KeccakHash
|
||||
bestDifficulty: DifficultyInt
|
||||
|
||||
const
|
||||
maxStateFetch = 384
|
||||
maxBodiesFetch = 128
|
||||
maxReceiptsFetch = 256
|
||||
maxHeadersFetch = 192
|
||||
protocolVersion = 63
|
||||
minPeersToStartSync = 2 # Wait for consensus of at least this number of peers before syncing
|
||||
|
||||
rlpxProtocol eth, protocolVersion:
|
||||
useRequestIds = false
|
||||
|
||||
type State = PeerState
|
||||
|
||||
onPeerConnected do (peer: Peer):
|
||||
let
|
||||
network = peer.network
|
||||
chain = network.chain
|
||||
bestBlock = chain.getBestBlockHeader
|
||||
|
||||
await peer.status(protocolVersion,
|
||||
network.networkId,
|
||||
bestBlock.difficulty,
|
||||
bestBlock.blockHash,
|
||||
chain.genesisHash)
|
||||
|
||||
let m = await peer.waitSingleMsg(eth.status)
|
||||
if m.networkId == network.networkId and m.genesisHash == chain.genesisHash:
|
||||
debug "Suitable peer", peer
|
||||
else:
|
||||
raise newException(UselessPeerError, "Eth handshake params mismatch")
|
||||
peer.state.initialized = true
|
||||
peer.state.bestDifficulty = m.totalDifficulty
|
||||
peer.state.bestBlockHash = m.bestHash
|
||||
|
||||
proc status(peer: Peer,
|
||||
protocolVersion: uint,
|
||||
networkId: uint,
|
||||
totalDifficulty: DifficultyInt,
|
||||
bestHash: KeccakHash,
|
||||
genesisHash: KeccakHash) =
|
||||
# verify that the peer is on the same chain:
|
||||
if peer.network.networkId != networkId or
|
||||
peer.network.chain.genesisHash != genesisHash:
|
||||
# TODO: Is there a more specific reason here?
|
||||
await peer.disconnect(SubprotocolReason)
|
||||
return
|
||||
|
||||
peer.state.bestBlockHash = bestHash
|
||||
peer.state.bestDifficulty = totalDifficulty
|
||||
|
||||
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
||||
discard
|
||||
|
||||
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
||||
discard
|
||||
|
||||
requestResponse:
|
||||
proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
|
||||
if request.maxResults > uint64(maxHeadersFetch):
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
var headers = newSeqOfCap[BlockHeader](request.maxResults)
|
||||
let chain = peer.network.chain
|
||||
var foundBlock: BlockHeader
|
||||
|
||||
if chain.getBlockHeader(request.startBlock, foundBlock):
|
||||
headers.add foundBlock
|
||||
|
||||
while uint64(headers.len) < request.maxResults:
|
||||
if not chain.getSuccessorHeader(foundBlock, foundBlock):
|
||||
break
|
||||
headers.add foundBlock
|
||||
|
||||
await peer.blockHeaders(headers)
|
||||
|
||||
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
||||
|
||||
requestResponse:
|
||||
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
if hashes.len > maxBodiesFetch:
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
var chain = peer.network.chain
|
||||
|
||||
var blockBodies = newSeqOfCap[BlockBody](hashes.len)
|
||||
for hash in hashes:
|
||||
let blockBody = chain.getBlockBody(hash)
|
||||
if not blockBody.isNil:
|
||||
# TODO: should there be an else clause here.
|
||||
# Is the peer responsible of figuring out that
|
||||
# some blocks were not found?
|
||||
blockBodies.add deref(blockBody)
|
||||
|
||||
await peer.blockBodies(blockBodies)
|
||||
|
||||
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
||||
|
||||
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
||||
discard
|
||||
|
||||
nextID 13
|
||||
|
||||
requestResponse:
|
||||
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
await peer.nodeData([])
|
||||
|
||||
proc nodeData(peer: Peer, data: openarray[Blob]) =
|
||||
discard
|
||||
|
||||
requestResponse:
|
||||
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) =
|
||||
await peer.receipts([])
|
||||
|
||||
proc receipts(peer: Peer, receipts: openarray[Receipt]) =
|
||||
discard
|
||||
|
||||
proc hash*(p: Peer): Hash {.inline.} = hash(cast[pointer](p))
|
||||
|
||||
|
Loading…
Reference in New Issue