2018-04-30 18:08:18 +00:00
|
|
|
#
|
|
|
|
# Ethereum P2P
|
|
|
|
# (c) Copyright 2018
|
|
|
|
# Status Research & Development GmbH
|
|
|
|
#
|
|
|
|
# Licensed under either of
|
|
|
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
|
|
|
# MIT license (LICENSE-MIT)
|
|
|
|
#
|
|
|
|
|
2018-04-01 02:41:05 +00:00
|
|
|
import
|
2018-07-22 21:32:45 +00:00
|
|
|
asyncdispatch2, rlp, stint, eth_common,
|
|
|
|
../../eth_p2p
|
2018-04-01 02:41:05 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
NewBlockHashesAnnounce* = object
|
|
|
|
hash: KeccakHash
|
|
|
|
number: uint
|
|
|
|
|
|
|
|
NewBlockAnnounce* = object
|
|
|
|
header: BlockHeader
|
|
|
|
body {.rlpInline.}: BlockBody
|
|
|
|
|
2018-07-08 22:26:14 +00:00
|
|
|
NetworkState = object
|
|
|
|
syncing: bool
|
|
|
|
|
|
|
|
PeerState = object
|
2018-07-23 10:44:56 +00:00
|
|
|
reportedTotalDifficulty: DifficultyInt
|
2018-07-08 22:26:14 +00:00
|
|
|
latestBlockHash: KeccakHash
|
|
|
|
|
|
|
|
const
|
|
|
|
maxStateFetch = 384
|
|
|
|
maxBodiesFetch = 128
|
|
|
|
maxReceiptsFetch = 256
|
|
|
|
maxHeadersFetch = 192
|
|
|
|
|
2018-04-13 12:59:08 +00:00
|
|
|
rlpxProtocol eth, 63:
|
2018-07-06 01:19:08 +00:00
|
|
|
useRequestIds = false
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
type State = PeerState
|
|
|
|
|
2018-07-08 22:26:14 +00:00
|
|
|
proc status(peer: Peer,
|
|
|
|
protocolVersion, networkId: uint,
|
2018-07-23 10:44:56 +00:00
|
|
|
totalDifficulty: DifficultyInt,
|
2018-04-01 02:41:05 +00:00
|
|
|
bestHash, genesisHash: KeccakHash) =
|
2018-07-08 22:26:14 +00:00
|
|
|
# verify that the peer is on the same chain:
|
2018-07-22 21:32:45 +00:00
|
|
|
if peer.network.networkId != networkId or
|
2018-07-08 22:26:14 +00:00
|
|
|
peer.network.chain.genesisHash != genesisHash:
|
2018-07-22 21:32:45 +00:00
|
|
|
# TODO: Is there a more specific reason here?
|
|
|
|
await peer.disconnect(SubprotocolReason)
|
2018-07-08 22:26:14 +00:00
|
|
|
return
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
peer.state.reportedTotalDifficulty = totalDifficulty
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-08 22:26:14 +00:00
|
|
|
proc newBlockHashes(peer: Peer, hashes: openarray[NewBlockHashesAnnounce]) =
|
2018-04-01 02:41:05 +00:00
|
|
|
discard
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
proc transactions(peer: Peer, transactions: openarray[Transaction]) =
|
2018-04-01 02:41:05 +00:00
|
|
|
discard
|
|
|
|
|
2018-07-06 01:19:08 +00:00
|
|
|
requestResponse:
|
2018-07-08 22:26:14 +00:00
|
|
|
proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
|
2018-07-22 21:32:45 +00:00
|
|
|
if request.maxResults > uint64(maxHeadersFetch):
|
|
|
|
await peer.disconnect(BreachOfProtocol)
|
2018-07-08 22:26:14 +00:00
|
|
|
return
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
var chain = peer.network.chain
|
|
|
|
|
|
|
|
var foundBlock = chain.getBlockHeader(request.startBlock)
|
2018-07-08 22:26:14 +00:00
|
|
|
if not foundBlock.isNil:
|
|
|
|
var headers = newSeqOfCap[BlockHeader](request.maxResults)
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
while uint64(headers.len) < request.maxResults:
|
|
|
|
headers.add deref(foundBlock)
|
|
|
|
foundBlock = chain.getSuccessorHeader deref(foundBlock)
|
2018-07-08 22:26:14 +00:00
|
|
|
if foundBlock.isNil: break
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
await peer.blockHeaders(headers)
|
2018-07-08 22:26:14 +00:00
|
|
|
|
|
|
|
proc blockHeaders(p: Peer, headers: openarray[BlockHeader])
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-06 01:19:08 +00:00
|
|
|
requestResponse:
|
2018-07-22 21:32:45 +00:00
|
|
|
proc getBlockBodies(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-07-08 22:26:14 +00:00
|
|
|
if hashes.len > maxBodiesFetch:
|
2018-07-22 21:32:45 +00:00
|
|
|
await peer.disconnect(BreachOfProtocol)
|
2018-07-08 22:26:14 +00:00
|
|
|
return
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
var chain = peer.network.chain
|
|
|
|
|
2018-07-08 22:26:14 +00:00
|
|
|
var blockBodies = newSeqOfCap[BlockBody](hashes.len)
|
|
|
|
for hash in hashes:
|
2018-07-22 21:32:45 +00:00
|
|
|
let blockBody = chain.getBlockBody(hash)
|
2018-07-08 22:26:14 +00:00
|
|
|
if not blockBody.isNil:
|
2018-07-22 21:32:45 +00:00
|
|
|
# TODO: should there be an else clause here.
|
|
|
|
# Is the peer responsible of figuring out that
|
|
|
|
# some blocks were not found?
|
2018-07-08 22:26:14 +00:00
|
|
|
blockBodies.add deref(blockBody)
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
await peer.blockBodies(blockBodies)
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
proc blockBodies(peer: Peer, blocks: openarray[BlockBody])
|
2018-07-08 22:26:14 +00:00
|
|
|
|
2018-07-23 10:44:56 +00:00
|
|
|
proc newBlock(peer: Peer, bh: NewBlockAnnounce, totalDifficulty: DifficultyInt) =
|
2018-04-01 02:41:05 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
nextID 13
|
|
|
|
|
2018-07-06 01:19:08 +00:00
|
|
|
requestResponse:
|
2018-07-22 21:32:45 +00:00
|
|
|
proc getNodeData(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-07-06 01:19:08 +00:00
|
|
|
discard
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
proc nodeData(peer: Peer, data: openarray[Blob]) =
|
2018-07-06 01:19:08 +00:00
|
|
|
discard
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-06 01:19:08 +00:00
|
|
|
requestResponse:
|
2018-07-22 21:32:45 +00:00
|
|
|
proc getReceipts(peer: Peer, hashes: openarray[KeccakHash]) =
|
2018-07-06 01:19:08 +00:00
|
|
|
discard
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
proc receipts(peer: Peer, receipts: openarray[Receipt]) =
|
2018-07-06 01:19:08 +00:00
|
|
|
discard
|
2018-04-01 02:41:05 +00:00
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
proc fastBlockchainSync*(node: EthereumNode) {.async.} =
|
2018-07-08 22:26:14 +00:00
|
|
|
# 1. obtain last N block headers from all peers
|
|
|
|
var latestBlocksRequest: BlocksRequest
|
2018-07-22 21:32:45 +00:00
|
|
|
var requests = newSeqOfCap[Future[Option[eth.blockHeaders]]](32)
|
|
|
|
for peer in node.peers:
|
2018-07-08 22:26:14 +00:00
|
|
|
if peer.supports(eth):
|
|
|
|
requests.add peer.getBlockHeaders(latestBlocksRequest)
|
|
|
|
|
2018-07-22 21:32:45 +00:00
|
|
|
discard await all(requests)
|
2018-07-08 22:26:14 +00:00
|
|
|
|
|
|
|
# 2. find out what is the block with best total difficulty
|
2018-07-23 10:44:56 +00:00
|
|
|
var bestBlockDifficulty: DifficultyInt = 0.stuint(256)
|
2018-07-08 22:26:14 +00:00
|
|
|
for req in requests:
|
|
|
|
if req.read.isNone: continue
|
|
|
|
for header in req.read.get.headers:
|
|
|
|
if header.difficulty > bestBlockDifficulty:
|
|
|
|
discard
|
|
|
|
|
|
|
|
# 3. establish the highest valid block for each peer
|
|
|
|
# keep in mind that some of the peers may report an alternative history, so
|
|
|
|
# we must find the last block where each peer agreed with the best peer
|
|
|
|
|
|
|
|
# 4. Start making requests in parallel for the block headers that we are
|
|
|
|
# missing (by requesting blocks from peers while honoring maxHeadersFetch).
|
|
|
|
# Make sure the blocks hashes add up. Don't count on everyone replying, ask
|
|
|
|
# a different peer in case of time-out. Handle invalid or incomplete replies
|
2018-07-22 21:32:45 +00:00
|
|
|
# properly. The peer may respond with fewer headers than requested (or with
|
2018-07-08 22:26:14 +00:00
|
|
|
# different ones if the peer is not behaving properly).
|
|
|
|
|
|
|
|
# 5. Store the obtained headers in the blockchain DB
|
|
|
|
|
|
|
|
# 6. Once the sync is complete, repeat from 1. until to further progress is
|
|
|
|
# possible
|
|
|
|
|
|
|
|
# 7. Start downloading the blockchain state in parallel
|
|
|
|
# (maybe this could start earlier).
|
|
|
|
|