Fixed requested block range, more reliable block downloader

This commit is contained in:
Yuriy Glukhov 2018-09-06 19:53:45 +03:00 committed by zah
parent 573ca08643
commit b5c898dc1e
1 changed files with 12 additions and 7 deletions

View File

@ -172,7 +172,7 @@ type
proc endIndex(b: WantedBlocks): BlockNumber =
result = b.startIndex
result += b.numBlocks.u256
result += (b.numBlocks - 1).u256
proc availableWorkItem(ctx: SyncContext): int =
var maxPendingBlock = ctx.finalizedBlock
@ -257,6 +257,7 @@ proc obtainBlocksFromPeer(peer: Peer, syncCtx: SyncContext) {.async.} =
skip: 0,
reverse: false)
var dataReceived = false
try:
let results = await peer.getBlockHeaders(request)
if results.isSome:
@ -277,18 +278,22 @@ proc obtainBlocksFromPeer(peer: Peer, syncCtx: SyncContext) {.async.} =
bodies.add(b.get.blocks)
shallowCopy(workItem.bodies, bodies)
syncCtx.returnWorkItem workItemIdx
continue
dataReceived = true
except:
# the success case uses `continue`, so we can just fall back to the
# failure path below. If we signal time-outs with exceptions such
# failures will be easier to handle.
discard
await peer.disconnect(SubprotocolReason)
syncCtx.returnWorkItem workItemIdx
syncCtx.handleLostPeer()
if dataReceived:
syncCtx.returnWorkItem workItemIdx
else:
try:
await peer.disconnect(SubprotocolReason)
except:
discard
syncCtx.handleLostPeer()
break
debug "Nothing to sync"