2018-04-30 21:08:18 +03:00
|
|
|
#
|
|
|
|
# Ethereum P2P
|
|
|
|
# (c) Copyright 2018
|
|
|
|
# Status Research & Development GmbH
|
|
|
|
#
|
|
|
|
# Licensed under either of
|
|
|
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
|
|
|
# MIT license (LICENSE-MIT)
|
|
|
|
#
|
|
|
|
|
2018-07-24 00:41:40 +03:00
|
|
|
## This module implements the Ethereum Wire Protocol:
|
|
|
|
## https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol
|
|
|
|
|
2018-04-01 05:41:05 +03:00
|
|
|
import
|
2018-07-24 00:39:41 +03:00
|
|
|
random,
|
2018-08-03 14:27:37 +03:00
|
|
|
asyncdispatch2, rlp, stint, eth_common, chronicles,
|
2018-07-23 00:32:45 +03:00
|
|
|
../../eth_p2p
|
2018-04-01 05:41:05 +03:00
|
|
|
|
|
|
|
type
|
|
|
|
NewBlockHashesAnnounce* = object
|
|
|
|
hash: KeccakHash
|
|
|
|
number: uint
|
|
|
|
|
|
|
|
NewBlockAnnounce* = object
|
|
|
|
header: BlockHeader
|
|
|
|
body {.rlpInline.}: BlockBody
|
|
|
|
|
2018-07-09 01:26:14 +03:00
|
|
|
NetworkState = object
|
|
|
|
syncing: bool
|
|
|
|
|
|
|
|
PeerState = object
|
2018-07-24 00:39:41 +03:00
|
|
|
initialized: bool
|
|
|
|
bestBlockHash: KeccakHash
|
|
|
|
bestDifficulty: DifficultyInt
|
2018-07-09 01:26:14 +03:00
|
|
|
|
|
|
|
const
|
|
|
|
maxStateFetch = 384
|
|
|
|
maxBodiesFetch = 128
|
|
|
|
maxReceiptsFetch = 256
|
|
|
|
maxHeadersFetch = 192
|
2018-07-24 00:39:41 +03:00
|
|
|
protocolVersion = 63
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
rlpxProtocol eth, protocolVersion:
|
2018-07-06 04:19:08 +03:00
|
|
|
useRequestIds = false
|
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
type State = PeerState
|
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
onPeerConnected do (peer: Peer):
|
|
|
|
let
|
|
|
|
network = peer.network
|
|
|
|
chain = network.chain
|
|
|
|
bestBlock = chain.getBestBlockHeader
|
|
|
|
|
|
|
|
await peer.status(protocolVersion,
|
|
|
|
network.networkId,
|
2018-08-29 11:44:17 +03:00
|
|
|
bestBlock.difficulty,
|
|
|
|
bestBlock.blockHash,
|
2018-07-24 00:39:41 +03:00
|
|
|
chain.genesisHash)
|
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
let m = await peer.waitSingleMsg(eth.status)
|
2018-07-24 00:39:41 +03:00
|
|
|
peer.state.initialized = true
|
2018-08-03 14:27:37 +03:00
|
|
|
peer.state.bestDifficulty = m.totalDifficulty
|
|
|
|
peer.state.bestBlockHash = m.bestHash
|
2018-07-24 00:39:41 +03:00
|
|
|
|
2018-07-09 01:26:14 +03:00
|
|
|
proc status(peer: Peer,
|
2018-07-24 00:39:41 +03:00
|
|
|
protocolVersion: uint,
|
|
|
|
networkId: uint,
|
2018-07-23 13:44:56 +03:00
|
|
|
totalDifficulty: DifficultyInt,
|
2018-07-24 00:39:41 +03:00
|
|
|
bestHash: KeccakHash,
|
|
|
|
genesisHash: KeccakHash) =
|
2018-07-09 01:26:14 +03:00
|
|
|
# verify that the peer is on the same chain:
|
2018-07-23 00:32:45 +03:00
|
|
|
if peer.network.networkId != networkId or
|
2018-07-09 01:26:14 +03:00
|
|
|
peer.network.chain.genesisHash != genesisHash:
|
2018-07-23 00:32:45 +03:00
|
|
|
# TODO: Is there a more specific reason here?
|
|
|
|
await peer.disconnect(SubprotocolReason)
|
2018-07-09 01:26:14 +03:00
|
|
|
return
|
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
peer.state.bestBlockHash = bestHash
|
|
|
|
peer.state.bestDifficulty = totalDifficulty
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-09 01:26:14 +03:00
|
|
|
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
2018-04-01 05:41:05 +03:00
|
|
|
discard
|
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
2018-04-01 05:41:05 +03:00
|
|
|
discard
|
|
|
|
|
2018-07-06 04:19:08 +03:00
|
|
|
requestResponse:
|
2018-07-09 01:26:14 +03:00
|
|
|
proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
|
2018-07-23 00:32:45 +03:00
|
|
|
if request.maxResults > uint64(maxHeadersFetch):
|
|
|
|
await peer.disconnect(BreachOfProtocol)
|
2018-07-09 01:26:14 +03:00
|
|
|
return
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
var headers = newSeqOfCap[BlockHeader](request.maxResults)
|
|
|
|
let chain = peer.network.chain
|
2018-08-29 11:44:17 +03:00
|
|
|
var foundBlock: BlockHeader
|
2018-08-03 14:27:37 +03:00
|
|
|
|
2018-08-29 11:44:17 +03:00
|
|
|
if chain.getBlockHeader(request.startBlock, foundBlock):
|
|
|
|
headers.add foundBlock
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
while uint64(headers.len) < request.maxResults:
|
2018-08-29 11:44:17 +03:00
|
|
|
if not chain.getSuccessorHeader(foundBlock, foundBlock):
|
|
|
|
break
|
|
|
|
headers.add foundBlock
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
await peer.blockHeaders(headers)
|
2018-07-09 01:26:14 +03:00
|
|
|
|
|
|
|
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-06 04:19:08 +03:00
|
|
|
requestResponse:
|
2018-07-23 00:32:45 +03:00
|
|
|
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-07-09 01:26:14 +03:00
|
|
|
if hashes.len > maxBodiesFetch:
|
2018-07-23 00:32:45 +03:00
|
|
|
await peer.disconnect(BreachOfProtocol)
|
2018-07-09 01:26:14 +03:00
|
|
|
return
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
var chain = peer.network.chain
|
|
|
|
|
2018-07-09 01:26:14 +03:00
|
|
|
var blockBodies = newSeqOfCap[BlockBody](hashes.len)
|
|
|
|
for hash in hashes:
|
2018-07-23 00:32:45 +03:00
|
|
|
let blockBody = chain.getBlockBody(hash)
|
2018-07-09 01:26:14 +03:00
|
|
|
if not blockBody.isNil:
|
2018-07-23 00:32:45 +03:00
|
|
|
# TODO: should there be an else clause here.
|
|
|
|
# Is the peer responsible of figuring out that
|
|
|
|
# some blocks were not found?
|
2018-07-09 01:26:14 +03:00
|
|
|
blockBodies.add deref(blockBody)
|
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
await peer.blockBodies(blockBodies)
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-23 13:44:56 +03:00
|
|
|
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
2018-04-01 05:41:05 +03:00
|
|
|
discard
|
|
|
|
|
|
|
|
nextID 13
|
|
|
|
|
2018-07-06 04:19:08 +03:00
|
|
|
requestResponse:
|
2018-07-23 00:32:45 +03:00
|
|
|
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-07-06 04:19:08 +03:00
|
|
|
discard
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
proc nodeData(peer: Peer, data: openarray[Blob]) =
|
2018-07-06 04:19:08 +03:00
|
|
|
discard
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-06 04:19:08 +03:00
|
|
|
requestResponse:
|
2018-07-23 00:32:45 +03:00
|
|
|
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-08-03 14:27:37 +03:00
|
|
|
await peer.receipts([])
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-23 00:32:45 +03:00
|
|
|
proc receipts(peer: Peer, receipts: openarray[Receipt]) =
|
2018-07-06 04:19:08 +03:00
|
|
|
discard
|
2018-04-01 05:41:05 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
type
|
|
|
|
SyncStatus* = enum
|
|
|
|
syncSuccess
|
|
|
|
syncNotEnoughPeers
|
|
|
|
syncTimeOut
|
|
|
|
|
|
|
|
WantedBlocksState = enum
|
|
|
|
Initial,
|
|
|
|
Requested,
|
|
|
|
Received
|
|
|
|
|
|
|
|
WantedBlocks = object
|
2018-08-03 14:27:37 +03:00
|
|
|
startIndex: BlockNumber
|
|
|
|
numBlocks: uint
|
2018-07-24 00:39:41 +03:00
|
|
|
state: WantedBlocksState
|
2018-08-29 11:44:17 +03:00
|
|
|
headers: seq[BlockHeader]
|
|
|
|
bodies: seq[BlockBody]
|
2018-07-24 00:39:41 +03:00
|
|
|
|
|
|
|
SyncContext = ref object
|
|
|
|
workQueue: seq[WantedBlocks]
|
2018-08-03 14:27:37 +03:00
|
|
|
endBlockNumber: BlockNumber
|
|
|
|
finalizedBlock: BlockNumber # Block which was downloaded and verified
|
2018-08-29 11:44:17 +03:00
|
|
|
chain: AbstractChainDB
|
2018-08-03 14:27:37 +03:00
|
|
|
|
|
|
|
proc endIndex(b: WantedBlocks): BlockNumber =
|
|
|
|
result = b.startIndex
|
|
|
|
result += b.numBlocks.u256
|
|
|
|
|
|
|
|
proc availableWorkItem(ctx: SyncContext): int =
|
|
|
|
var maxPendingBlock = ctx.finalizedBlock
|
|
|
|
result = -1
|
|
|
|
for i in 0 .. ctx.workQueue.high:
|
|
|
|
case ctx.workQueue[i].state
|
|
|
|
of Initial:
|
|
|
|
return i
|
|
|
|
of Received:
|
|
|
|
result = i
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
|
|
|
let eb = ctx.workQueue[i].endIndex
|
|
|
|
if eb > maxPendingBlock: maxPendingBlock = eb
|
2018-07-24 00:39:41 +03:00
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
let nextRequestedBlock = maxPendingBlock + 1
|
|
|
|
if nextRequestedBlock >= ctx.endBlockNumber:
|
|
|
|
return -1
|
|
|
|
|
|
|
|
if result == -1:
|
|
|
|
result = ctx.workQueue.len
|
|
|
|
ctx.workQueue.setLen(result + 1)
|
|
|
|
|
|
|
|
var numBlocks = (ctx.endBlockNumber - nextRequestedBlock).toInt
|
|
|
|
if numBlocks > maxHeadersFetch:
|
|
|
|
numBlocks = maxHeadersFetch
|
|
|
|
ctx.workQueue[result] = WantedBlocks(startIndex: nextRequestedBlock, numBlocks: numBlocks.uint, state: Initial)
|
2018-07-24 00:39:41 +03:00
|
|
|
|
|
|
|
proc returnWorkItem(ctx: SyncContext, workItem: int) =
|
2018-08-29 11:44:17 +03:00
|
|
|
let wi = addr ctx.workQueue[workItem]
|
|
|
|
let askedBlocks = wi.numBlocks.int
|
|
|
|
let receivedBlocks = wi.headers.len
|
|
|
|
|
|
|
|
if askedBlocks == receivedBlocks:
|
|
|
|
debug "Work item complete", startBlock = wi.startIndex,
|
|
|
|
askedBlocks,
|
|
|
|
receivedBlocks
|
|
|
|
else:
|
|
|
|
warn "Work item complete", startBlock = wi.startIndex,
|
|
|
|
askedBlocks,
|
|
|
|
receivedBlocks
|
|
|
|
|
|
|
|
ctx.chain.persistBlocks(wi.headers, wi.bodies)
|
|
|
|
wi.headers.setLen(0)
|
|
|
|
wi.bodies.setLen(0)
|
|
|
|
|
|
|
|
proc newSyncContext(startBlock, endBlock: BlockNumber, chain: AbstractChainDB): SyncContext =
|
2018-08-03 14:27:37 +03:00
|
|
|
new result
|
|
|
|
result.endBlockNumber = endBlock
|
|
|
|
result.finalizedBlock = startBlock
|
2018-08-29 11:44:17 +03:00
|
|
|
result.chain = chain
|
2018-07-24 00:39:41 +03:00
|
|
|
|
|
|
|
proc handleLostPeer(ctx: SyncContext) =
|
|
|
|
# TODO: ask the PeerPool for new connections and then call
|
2018-08-03 14:27:37 +03:00
|
|
|
# `obtainBlocksFromPeer`
|
2018-07-24 00:39:41 +03:00
|
|
|
discard
|
|
|
|
|
|
|
|
proc randomOtherPeer(node: EthereumNode, particularPeer: Peer): Peer =
|
|
|
|
# TODO: we can maintain a per-protocol list of peers in EtheruemNode
|
|
|
|
var ethPeersCount = 0
|
|
|
|
for peer in node.peers(eth):
|
|
|
|
if peer != particularPeer:
|
|
|
|
inc ethPeersCount
|
|
|
|
|
|
|
|
if ethPeersCount == 0: return nil
|
|
|
|
let peerIdx = random(ethPeersCount) + 1
|
|
|
|
for peer in node.peers(eth):
|
|
|
|
if peer != particularPeer:
|
|
|
|
if peerIdx == ethPeersCount: return peer
|
|
|
|
dec ethPeersCount
|
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
proc obtainBlocksFromPeer(peer: Peer, syncCtx: SyncContext) {.async.} =
|
|
|
|
while (let workItemIdx = syncCtx.availableWorkItem(); workItemIdx != -1):
|
2018-07-24 00:39:41 +03:00
|
|
|
template workItem: auto = syncCtx.workQueue[workItemIdx]
|
|
|
|
workItem.state = Requested
|
2018-08-03 14:27:37 +03:00
|
|
|
debug "Requesting block headers", start = workItem.startIndex, count = workItem.numBlocks
|
2018-07-24 00:39:41 +03:00
|
|
|
let request = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(isHash: false,
|
2018-08-03 14:27:37 +03:00
|
|
|
number: workItem.startIndex),
|
|
|
|
maxResults: workItem.numBlocks,
|
2018-07-24 00:39:41 +03:00
|
|
|
skip: 0,
|
|
|
|
reverse: false)
|
|
|
|
|
|
|
|
try:
|
|
|
|
let results = await peer.getBlockHeaders(request)
|
|
|
|
if results.isSome:
|
|
|
|
workItem.state = Received
|
2018-08-29 11:44:17 +03:00
|
|
|
shallowCopy(workItem.headers, results.get.headers)
|
|
|
|
|
|
|
|
var bodies = newSeq[BlockBody]()
|
|
|
|
var hashes = newSeq[KeccakHash]()
|
|
|
|
for i in workItem.headers:
|
|
|
|
hashes.add(blockHash(i))
|
|
|
|
if hashes.len == maxBodiesFetch:
|
|
|
|
let b = await peer.getBlockBodies(hashes)
|
|
|
|
hashes.setLen(0)
|
|
|
|
bodies.add(b.get.blocks)
|
|
|
|
|
|
|
|
if hashes.len != 0:
|
|
|
|
let b = await peer.getBlockBodies(hashes)
|
|
|
|
bodies.add(b.get.blocks)
|
|
|
|
|
|
|
|
shallowCopy(workItem.bodies, bodies)
|
2018-08-03 14:27:37 +03:00
|
|
|
syncCtx.returnWorkItem workItemIdx
|
2018-08-29 11:44:17 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
continue
|
|
|
|
except:
|
|
|
|
# the success case uses `continue`, so we can just fall back to the
|
|
|
|
# failure path below. If we signal time-outs with exceptions such
|
|
|
|
# failures will be easier to handle.
|
|
|
|
discard
|
|
|
|
|
|
|
|
await peer.disconnect(SubprotocolReason)
|
|
|
|
syncCtx.returnWorkItem workItemIdx
|
|
|
|
syncCtx.handleLostPeer()
|
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
debug "Nothing to sync"
|
|
|
|
|
|
|
|
proc findBestPeer(node: EthereumNode): (Peer, DifficultyInt) =
|
2018-07-24 00:39:41 +03:00
|
|
|
var
|
|
|
|
bestBlockDifficulty: DifficultyInt = 0.stuint(256)
|
|
|
|
bestPeer: Peer = nil
|
|
|
|
|
|
|
|
for peer in node.peers(eth):
|
|
|
|
let peerEthState = peer.state(eth)
|
|
|
|
if peerEthState.initialized:
|
|
|
|
if peerEthState.bestDifficulty > bestBlockDifficulty:
|
|
|
|
bestBlockDifficulty = peerEthState.bestDifficulty
|
|
|
|
bestPeer = peer
|
|
|
|
|
2018-08-03 14:27:37 +03:00
|
|
|
result = (bestPeer, bestBlockDifficulty)
|
|
|
|
|
|
|
|
proc fastBlockchainSync*(node: EthereumNode): Future[SyncStatus] {.async.} =
|
|
|
|
## Code for the fast blockchain sync procedure:
|
|
|
|
## https://github.com/ethereum/wiki/wiki/Parallel-Block-Downloads
|
|
|
|
## https://github.com/ethereum/go-ethereum/pull/1889
|
|
|
|
var
|
|
|
|
bestBlockNumber: BlockNumber
|
|
|
|
|
|
|
|
debug "start sync"
|
|
|
|
|
|
|
|
var (bestPeer, bestBlockDifficulty) = node.findBestPeer()
|
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
if bestPeer == nil:
|
|
|
|
return syncNotEnoughPeers
|
|
|
|
|
|
|
|
while true:
|
|
|
|
let request = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(isHash: true,
|
|
|
|
hash: bestPeer.state(eth).bestBlockHash),
|
|
|
|
maxResults: 1,
|
|
|
|
skip: 0,
|
|
|
|
reverse: true)
|
|
|
|
|
|
|
|
let latestBlock = await bestPeer.getBlockHeaders(request)
|
|
|
|
|
|
|
|
if latestBlock.isSome and latestBlock.get.headers.len > 0:
|
|
|
|
bestBlockNumber = latestBlock.get.headers[0].blockNumber
|
|
|
|
break
|
|
|
|
|
|
|
|
# TODO: maintain multiple "best peer" candidates and send requests
|
|
|
|
# to the second best option
|
|
|
|
bestPeer = node.randomOtherPeer(bestPeer)
|
|
|
|
if bestPeer == nil:
|
|
|
|
return syncNotEnoughPeers
|
|
|
|
|
|
|
|
# does the network agree with our best block?
|
|
|
|
var
|
|
|
|
localChain = node.chain
|
|
|
|
bestLocalHeader = localChain.getBestBlockHeader
|
|
|
|
|
|
|
|
for peer in node.randomPeers(5):
|
2018-07-09 01:26:14 +03:00
|
|
|
if peer.supports(eth):
|
2018-07-24 00:39:41 +03:00
|
|
|
let request = BlocksRequest(
|
|
|
|
startBlock: HashOrNum(isHash: false,
|
|
|
|
number: bestLocalHeader.blockNumber),
|
|
|
|
maxResults: 1,
|
|
|
|
skip: 0,
|
|
|
|
reverse: true)
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
# TODO: check if the majority of peers agree with the block
|
|
|
|
# positioned at our best block number.
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
# TODO: In case of disagreement, perform a binary search to locate a
|
|
|
|
# block where we agree.
|
2018-07-09 01:26:14 +03:00
|
|
|
|
2018-07-24 00:39:41 +03:00
|
|
|
if bestLocalHeader.blockNumber >= bestBlockNumber:
|
|
|
|
return syncSuccess
|
2018-07-09 01:26:14 +03:00
|
|
|
|
|
|
|
# 4. Start making requests in parallel for the block headers that we are
|
|
|
|
# missing (by requesting blocks from peers while honoring maxHeadersFetch).
|
|
|
|
# Make sure the blocks hashes add up. Don't count on everyone replying, ask
|
|
|
|
# a different peer in case of time-out. Handle invalid or incomplete replies
|
2018-07-23 00:32:45 +03:00
|
|
|
# properly. The peer may respond with fewer headers than requested (or with
|
2018-07-09 01:26:14 +03:00
|
|
|
# different ones if the peer is not behaving properly).
|
2018-08-29 11:44:17 +03:00
|
|
|
var syncCtx = newSyncContext(bestLocalHeader.blockNumber, bestBlockNumber, node.chain)
|
2018-07-24 00:39:41 +03:00
|
|
|
|
|
|
|
for peer in node.peers:
|
|
|
|
if peer.supports(eth):
|
|
|
|
# TODO: we should also monitor the PeerPool for new peers here and
|
|
|
|
# we should automatically add them to the loop.
|
2018-08-03 14:27:37 +03:00
|
|
|
asyncCheck obtainBlocksFromPeer(peer, syncCtx)
|
2018-07-09 01:26:14 +03:00
|
|
|
|
|
|
|
# 5. Store the obtained headers in the blockchain DB
|
|
|
|
|
|
|
|
# 6. Once the sync is complete, repeat from 1. until to further progress is
|
|
|
|
# possible
|
|
|
|
|
|
|
|
# 7. Start downloading the blockchain state in parallel
|
|
|
|
# (maybe this could start earlier).
|
|
|
|
|