2018-11-23 23:58:49 +00:00
|
|
|
import
|
2019-06-10 11:13:53 +00:00
|
|
|
options, tables, sequtils, algorithm,
|
2019-03-05 22:54:08 +00:00
|
|
|
chronicles, chronos, ranges/bitranges,
|
2019-05-01 09:19:29 +00:00
|
|
|
spec/[datatypes, crypto, digest, helpers], eth/rlp,
|
2019-03-12 15:03:14 +00:00
|
|
|
beacon_node_types, eth2_network, beacon_chain_db, block_pool, time, ssz
|
|
|
|
|
|
|
|
from beacon_node import onBeaconBlock
|
|
|
|
# Careful handling of beacon_node <-> sync_protocol
|
|
|
|
# to avoid recursive dependencies
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
ValidatorChangeLogEntry* = object
|
|
|
|
case kind*: ValidatorSetDeltaFlags
|
2018-12-03 17:46:22 +00:00
|
|
|
of Activation:
|
2018-11-29 01:08:34 +00:00
|
|
|
pubkey: ValidatorPubKey
|
2018-11-23 23:58:49 +00:00
|
|
|
else:
|
|
|
|
index: uint32
|
|
|
|
|
2019-01-17 18:27:11 +00:00
|
|
|
ValidatorSet = seq[Validator]
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-18 10:34:39 +00:00
|
|
|
BeaconSyncState* = ref object
|
2019-03-18 03:54:08 +00:00
|
|
|
networkId*: uint64
|
2019-02-18 10:34:39 +00:00
|
|
|
node*: BeaconNode
|
|
|
|
db*: BeaconChainDB
|
|
|
|
|
2019-04-09 07:53:40 +00:00
|
|
|
const
|
|
|
|
MaxRootsToRequest = 512
|
|
|
|
MaxHeadersToRequest = MaxRootsToRequest
|
2019-04-26 06:21:46 +00:00
|
|
|
MaxAncestorBlocksResponse = 256
|
2019-04-09 07:53:40 +00:00
|
|
|
|
2019-06-10 11:13:53 +00:00
|
|
|
func toHeader(b: BeaconBlock): BeaconBlockHeader =
|
|
|
|
BeaconBlockHeader(
|
|
|
|
slot: b.slot,
|
|
|
|
previous_block_root: b.previous_block_root,
|
2019-03-01 20:09:20 +00:00
|
|
|
state_root: b.state_root,
|
2019-06-10 11:13:53 +00:00
|
|
|
block_body_root: hash_tree_root(b.body),
|
|
|
|
signature: b.signature
|
2019-03-01 20:09:20 +00:00
|
|
|
)
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-06-10 11:13:53 +00:00
|
|
|
proc fromHeaderAndBody(b: var BeaconBlock, h: BeaconBlockHeader, body: BeaconBlockBody) =
|
|
|
|
doAssert(hash_tree_root(body) == h.block_body_root)
|
|
|
|
b.slot = h.slot
|
|
|
|
b.previous_block_root = h.previous_block_root
|
2019-02-18 10:34:39 +00:00
|
|
|
b.state_root = h.state_root
|
2019-03-05 13:55:09 +00:00
|
|
|
b.body = body
|
2019-06-10 11:13:53 +00:00
|
|
|
b.signature = h.signature
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-03-13 21:23:01 +00:00
|
|
|
proc importBlocks(node: BeaconNode,
|
2019-06-10 11:13:53 +00:00
|
|
|
blocks: openarray[BeaconBlock]) =
|
|
|
|
for blk in blocks:
|
|
|
|
node.onBeaconBlock(blk)
|
|
|
|
info "Forward sync imported blocks", len = blocks.len
|
|
|
|
|
|
|
|
proc mergeBlockHeadersAndBodies(headers: openarray[BeaconBlockHeader], bodies: openarray[BeaconBlockBody]): Option[seq[BeaconBlock]] =
|
|
|
|
if bodies.len != headers.len:
|
|
|
|
info "Cannot merge bodies and headers. Length mismatch.", bodies = bodies.len, headers = headers.len
|
|
|
|
return
|
|
|
|
|
|
|
|
var res: seq[BeaconBlock]
|
|
|
|
for i in 0 ..< headers.len:
|
|
|
|
if hash_tree_root(bodies[i]) != headers[i].block_body_root:
|
|
|
|
info "Block body is wrong for header"
|
|
|
|
return
|
|
|
|
|
|
|
|
res.setLen(res.len + 1)
|
|
|
|
res[^1].fromHeaderAndBody(headers[i], bodies[i])
|
|
|
|
some(res)
|
|
|
|
|
|
|
|
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
|
2019-02-18 10:34:39 +00:00
|
|
|
|
|
|
|
|
2018-11-29 01:08:34 +00:00
|
|
|
p2pProtocol BeaconSync(version = 1,
|
2019-02-18 10:34:39 +00:00
|
|
|
shortName = "bcs",
|
|
|
|
networkState = BeaconSyncState):
|
|
|
|
|
|
|
|
onPeerConnected do(peer: Peer):
|
2019-03-18 03:54:08 +00:00
|
|
|
let
|
2019-02-18 10:34:39 +00:00
|
|
|
protocolVersion = 1 # TODO: Spec doesn't specify this yet
|
2019-03-18 03:54:08 +00:00
|
|
|
node = peer.networkState.node
|
|
|
|
networkId = peer.networkState.networkId
|
2019-03-25 20:52:23 +00:00
|
|
|
blockPool = node.blockPool
|
2019-05-01 09:19:29 +00:00
|
|
|
finalizedHead = blockPool.finalizedHead
|
|
|
|
headBlock = blockPool.head.blck
|
|
|
|
bestRoot = headBlock.root
|
2019-04-09 07:53:40 +00:00
|
|
|
bestSlot = headBlock.slot
|
2019-05-01 09:19:29 +00:00
|
|
|
latestFinalizedEpoch = finalizedHead.slot.slot_to_epoch()
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-03-26 10:01:13 +00:00
|
|
|
let m = await handshake(peer, timeout = 10.seconds,
|
2019-05-01 09:19:29 +00:00
|
|
|
status(networkId, finalizedHead.blck.root,
|
2019-03-05 22:54:08 +00:00
|
|
|
latestFinalizedEpoch, bestRoot, bestSlot))
|
2019-03-18 03:54:08 +00:00
|
|
|
|
|
|
|
if m.networkId != networkId:
|
|
|
|
await peer.disconnect(UselessPeer)
|
|
|
|
return
|
|
|
|
|
2019-03-26 10:01:13 +00:00
|
|
|
# TODO: onPeerConnected runs unconditionally for every connected peer, but we
|
|
|
|
# don't need to sync with everybody. The beacon node should detect a situation
|
|
|
|
# where it needs to sync and it should execute the sync algorithm with a certain
|
|
|
|
# number of randomly selected peers. The algorithm itself must be extracted in a proc.
|
|
|
|
try:
|
2019-04-09 07:53:40 +00:00
|
|
|
debug "Peer connected. Initiating sync", peer, bestSlot, remoteBestSlot = m.bestSlot
|
2019-03-27 13:04:09 +00:00
|
|
|
|
2019-03-26 10:01:13 +00:00
|
|
|
let bestDiff = cmp((latestFinalizedEpoch, bestSlot), (m.latestFinalizedEpoch, m.bestSlot))
|
2019-04-09 07:53:40 +00:00
|
|
|
if bestDiff >= 0:
|
2019-03-26 10:01:13 +00:00
|
|
|
# Nothing to do?
|
|
|
|
trace "Nothing to sync", peer = peer.remote
|
2019-02-18 10:34:39 +00:00
|
|
|
else:
|
2019-03-26 10:01:13 +00:00
|
|
|
# TODO: Check for WEAK_SUBJECTIVITY_PERIOD difference and terminate the
|
|
|
|
# connection if it's too big.
|
|
|
|
|
2019-04-09 07:53:40 +00:00
|
|
|
var s = bestSlot + 1
|
|
|
|
while s <= m.bestSlot:
|
2019-06-10 11:13:53 +00:00
|
|
|
debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer
|
|
|
|
let headersLeft = int(m.bestSlot - s)
|
|
|
|
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, 0)
|
|
|
|
if blocks.isSome:
|
|
|
|
if blocks.get.len == 0:
|
|
|
|
info "Got 0 blocks while syncing", peer
|
2019-04-10 12:47:37 +00:00
|
|
|
break
|
2019-06-10 11:13:53 +00:00
|
|
|
node.importBlocks(blocks.get)
|
|
|
|
let lastSlot = blocks.get[^1].slot
|
|
|
|
if lastSlot <= s:
|
|
|
|
info "Slot did not advance during sync", peer
|
|
|
|
break
|
|
|
|
|
|
|
|
s = lastSlot + 1
|
2019-04-09 07:53:40 +00:00
|
|
|
else:
|
|
|
|
break
|
2019-03-27 13:04:09 +00:00
|
|
|
|
2019-03-26 10:01:13 +00:00
|
|
|
except CatchableError:
|
|
|
|
warn "Failed to sync with peer", peer, err = getCurrentExceptionMsg()
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc status(
|
|
|
|
peer: Peer,
|
2019-03-18 03:54:08 +00:00
|
|
|
networkId: uint64,
|
2019-03-05 22:54:08 +00:00
|
|
|
latestFinalizedRoot: Eth2Digest,
|
2019-03-13 21:23:01 +00:00
|
|
|
latestFinalizedEpoch: Epoch,
|
2019-03-05 22:54:08 +00:00
|
|
|
bestRoot: Eth2Digest,
|
2019-03-13 21:23:01 +00:00
|
|
|
bestSlot: Slot) {.libp2pProtocol("hello", "1.0.0").}
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-04-09 07:53:40 +00:00
|
|
|
requestResponse:
|
|
|
|
proc getBeaconBlockRoots(peer: Peer, fromSlot: Slot, maxRoots: int) =
|
2019-04-10 12:47:37 +00:00
|
|
|
let maxRoots = min(MaxRootsToRequest, maxRoots)
|
2019-04-09 07:53:40 +00:00
|
|
|
var s = fromSlot
|
|
|
|
var roots = newSeqOfCap[(Eth2Digest, Slot)](maxRoots)
|
|
|
|
let blockPool = peer.networkState.node.blockPool
|
2019-05-01 09:19:29 +00:00
|
|
|
let maxSlot = blockPool.head.blck.slot
|
2019-04-09 07:53:40 +00:00
|
|
|
while s <= maxSlot:
|
|
|
|
for r in blockPool.blockRootsForSlot(s):
|
|
|
|
roots.add((r, s))
|
|
|
|
if roots.len == maxRoots: break
|
|
|
|
s += 1
|
|
|
|
await response.send(roots)
|
|
|
|
|
|
|
|
proc beaconBlockRoots(peer: Peer, roots: openarray[(Eth2Digest, Slot)])
|
2019-02-18 10:34:39 +00:00
|
|
|
|
|
|
|
requestResponse:
|
2019-03-05 22:54:08 +00:00
|
|
|
proc getBeaconBlockHeaders(
|
|
|
|
peer: Peer,
|
|
|
|
blockRoot: Eth2Digest,
|
2019-03-13 21:23:01 +00:00
|
|
|
slot: Slot,
|
2019-03-05 22:54:08 +00:00
|
|
|
maxHeaders: int,
|
2019-06-10 11:13:53 +00:00
|
|
|
skipSlots: int,
|
|
|
|
backward: uint8) {.libp2pProtocol("rpc/beacon_block_headers", "1.0.0").} =
|
2019-04-10 12:47:37 +00:00
|
|
|
let maxHeaders = min(MaxHeadersToRequest, maxHeaders)
|
2019-06-10 11:13:53 +00:00
|
|
|
var headers: seq[BeaconBlockHeader]
|
2019-02-18 10:34:39 +00:00
|
|
|
let db = peer.networkState.db
|
2019-06-10 11:13:53 +00:00
|
|
|
|
|
|
|
if backward != 0:
|
|
|
|
# TODO: implement skipSlots
|
|
|
|
|
|
|
|
var blockRoot = blockRoot
|
|
|
|
if slot != GENESIS_SLOT:
|
|
|
|
# TODO: Get block from the best chain by slot
|
|
|
|
# blockRoot = ...
|
|
|
|
discard
|
|
|
|
|
|
|
|
let blockPool = peer.networkState.node.blockPool
|
|
|
|
var br = blockPool.getRef(blockRoot)
|
|
|
|
var blockRefs = newSeqOfCap[BlockRef](maxHeaders)
|
|
|
|
|
|
|
|
while not br.isNil:
|
|
|
|
blockRefs.add(br)
|
|
|
|
if blockRefs.len == maxHeaders:
|
|
|
|
break
|
|
|
|
br = br.parent
|
|
|
|
|
|
|
|
headers = newSeqOfCap[BeaconBlockHeader](blockRefs.len)
|
|
|
|
for i in blockRefs.high .. 0:
|
|
|
|
headers.add(blockPool.get(blockRefs[i]).data.toHeader)
|
|
|
|
else:
|
|
|
|
# TODO: This branch has to be revisited and possibly somehow merged with the
|
|
|
|
# branch above once we can traverse the best chain forward
|
|
|
|
# TODO: implement skipSlots
|
|
|
|
headers = newSeqOfCap[BeaconBlockHeader](maxHeaders)
|
|
|
|
var s = slot
|
|
|
|
let blockPool = peer.networkState.node.blockPool
|
|
|
|
let maxSlot = blockPool.head.blck.slot
|
|
|
|
while s <= maxSlot:
|
|
|
|
for r in blockPool.blockRootsForSlot(s):
|
|
|
|
headers.add(db.getBlock(r).get().toHeader)
|
|
|
|
if headers.len == maxHeaders: break
|
|
|
|
s += 1
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
await response.send(headers)
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-06-10 11:13:53 +00:00
|
|
|
proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader])
|
2019-03-28 14:03:19 +00:00
|
|
|
|
|
|
|
requestResponse:
|
|
|
|
proc getAncestorBlocks(
|
2019-03-05 22:54:08 +00:00
|
|
|
peer: Peer,
|
2019-03-28 14:03:19 +00:00
|
|
|
needed: openarray[FetchRecord]) =
|
2019-04-26 06:21:46 +00:00
|
|
|
var resp = newSeqOfCap[BeaconBlock](needed.len)
|
2019-03-28 14:03:19 +00:00
|
|
|
let db = peer.networkState.db
|
2019-04-26 06:21:46 +00:00
|
|
|
var neededRoots = initSet[Eth2Digest]()
|
|
|
|
for rec in needed: neededRoots.incl(rec.root)
|
|
|
|
|
2019-03-28 14:03:19 +00:00
|
|
|
for rec in needed:
|
|
|
|
if (var blck = db.getBlock(rec.root); blck.isSome()):
|
|
|
|
# TODO validate historySlots
|
|
|
|
let firstSlot = blck.get().slot - rec.historySlots
|
|
|
|
|
|
|
|
for i in 0..<rec.historySlots.int:
|
|
|
|
resp.add(blck.get())
|
2019-04-26 06:21:46 +00:00
|
|
|
if resp.len >= MaxAncestorBlocksResponse:
|
|
|
|
break
|
|
|
|
|
|
|
|
if blck.get().previous_block_root in neededRoots:
|
|
|
|
# Don't send duplicate blocks, if neededRoots has roots that are
|
|
|
|
# in the same chain
|
|
|
|
break
|
2019-03-28 14:03:19 +00:00
|
|
|
|
|
|
|
if (blck = db.getBlock(blck.get().previous_block_root);
|
|
|
|
blck.isNone() or blck.get().slot < firstSlot):
|
|
|
|
break
|
|
|
|
|
2019-04-26 06:21:46 +00:00
|
|
|
if resp.len >= MaxAncestorBlocksResponse:
|
|
|
|
break
|
|
|
|
|
2019-03-28 14:03:19 +00:00
|
|
|
await response.send(resp)
|
|
|
|
|
|
|
|
proc ancestorBlocks(peer: Peer, blocks: openarray[BeaconBlock])
|
2019-02-18 10:34:39 +00:00
|
|
|
|
|
|
|
requestResponse:
|
2019-03-05 22:54:08 +00:00
|
|
|
proc getBeaconBlockBodies(
|
|
|
|
peer: Peer,
|
|
|
|
blockRoots: openarray[Eth2Digest]) {.libp2pProtocol("rpc/beacon_block_bodies", "1.0.0").} =
|
2019-02-18 10:34:39 +00:00
|
|
|
# TODO: Validate blockRoots.len
|
|
|
|
var bodies = newSeqOfCap[BeaconBlockBody](blockRoots.len)
|
|
|
|
let db = peer.networkState.db
|
|
|
|
for r in blockRoots:
|
|
|
|
if (let blk = db.getBlock(r); blk.isSome):
|
|
|
|
bodies.add(blk.get().body)
|
2019-06-10 11:13:53 +00:00
|
|
|
else:
|
|
|
|
bodies.setLen(bodies.len + 1) # According to wire spec. Pad with zero body.
|
2019-03-05 22:54:08 +00:00
|
|
|
await response.send(bodies)
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc beaconBlockBodies(
|
|
|
|
peer: Peer,
|
|
|
|
blockBodies: openarray[BeaconBlockBody])
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-06-10 11:13:53 +00:00
|
|
|
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.async.} =
|
|
|
|
## Retrieve block headers and block bodies from the remote peer, merge them into blocks.
|
|
|
|
assert(maxBlocks <= MaxHeadersToRequest)
|
|
|
|
let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward)
|
|
|
|
if headersResp.isNone: return
|
|
|
|
|
|
|
|
let headers = headersResp.get.blockHeaders
|
|
|
|
if headers.len == 0:
|
|
|
|
info "Peer has no headers", peer
|
|
|
|
var res: seq[BeaconBlock]
|
|
|
|
return some(res)
|
|
|
|
|
|
|
|
let bodiesRequest = headers.mapIt(signing_root(it))
|
|
|
|
|
|
|
|
debug "Block headers received. Requesting block bodies", peer
|
|
|
|
let bodiesResp = await peer.getBeaconBlockBodies(bodiesRequest)
|
|
|
|
if bodiesResp.isNone:
|
|
|
|
info "Did not receive bodies", peer
|
|
|
|
return
|
|
|
|
|
|
|
|
result = mergeBlockHeadersAndBodies(headers, bodiesResp.get.blockBodies)
|
|
|
|
# If result.isNone: disconnect with BreachOfProtocol?
|