mirror of https://github.com/status-im/op-geth.git
eth/downloader: stream partial skeleton filling to processor
This commit is contained in:
parent
b40dc8a1da
commit
e86619e75d
|
@ -54,7 +54,7 @@ var (
|
||||||
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
|
blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
|
||||||
|
|
||||||
headerTargetRTT = time.Second // [eth/62] Target time for completing a header retrieval request (only for measurements for now)
|
headerTargetRTT = time.Second // [eth/62] Target time for completing a header retrieval request (only for measurements for now)
|
||||||
headerTTL = 2 * time.Second // [eth/62] Time it takes for a header request to time out
|
headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out
|
||||||
bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request
|
bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request
|
||||||
bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired
|
bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired
|
||||||
receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request
|
receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request
|
||||||
|
@ -1064,7 +1064,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise check if we already know the header or not
|
// Otherwise check if we already know the header or not
|
||||||
if (d.mode != LightSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
|
if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) {
|
||||||
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -1226,21 +1226,24 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||||
|
|
||||||
// If we received a skeleton batch, resolve internals concurrently
|
// If we received a skeleton batch, resolve internals concurrently
|
||||||
if skeleton {
|
if skeleton {
|
||||||
filled, err := d.fillHeaderSkeleton(from, headers)
|
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
|
glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err)
|
||||||
return errInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
headers = filled
|
headers = filled[proced:]
|
||||||
|
from += uint64(proced)
|
||||||
}
|
}
|
||||||
// Insert all the new headers and fetch the next batch
|
// Insert all the new headers and fetch the next batch
|
||||||
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
|
if len(headers) > 0 {
|
||||||
select {
|
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
|
||||||
case d.headerProcCh <- headers:
|
select {
|
||||||
case <-d.cancelCh:
|
case d.headerProcCh <- headers:
|
||||||
return errCancelHeaderFetch
|
case <-d.cancelCh:
|
||||||
|
return errCancelHeaderFetch
|
||||||
|
}
|
||||||
|
from += uint64(len(headers))
|
||||||
}
|
}
|
||||||
from += uint64(len(headers))
|
|
||||||
getHeaders(from)
|
getHeaders(from)
|
||||||
|
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
|
@ -1272,14 +1275,21 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
||||||
|
|
||||||
// fillHeaderSkeleton concurrently retrieves headers from all our available peers
|
// fillHeaderSkeleton concurrently retrieves headers from all our available peers
|
||||||
// and maps them to the provided skeleton header chain.
|
// and maps them to the provided skeleton header chain.
|
||||||
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, error) {
|
//
|
||||||
|
// Any partial results from the beginning of the skeleton is (if possible) forwarded
|
||||||
|
// immediately to the header processor to keep the rest of the pipeline full even
|
||||||
|
// in the case of header stalls.
|
||||||
|
//
|
||||||
|
// The method returs the entire filled skeleton and also the number of headers
|
||||||
|
// already forwarded for processing.
|
||||||
|
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
||||||
glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
|
glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from)
|
||||||
d.queue.ScheduleSkeleton(from, skeleton)
|
d.queue.ScheduleSkeleton(from, skeleton)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
deliver = func(packet dataPack) (int, error) {
|
deliver = func(packet dataPack) (int, error) {
|
||||||
pack := packet.(*headerPack)
|
pack := packet.(*headerPack)
|
||||||
return d.queue.DeliverHeaders(pack.peerId, pack.headers)
|
return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh)
|
||||||
}
|
}
|
||||||
expire = func() map[string]int { return d.queue.ExpireHeaders(headerTTL) }
|
expire = func() map[string]int { return d.queue.ExpireHeaders(headerTTL) }
|
||||||
throttle = func() bool { return false }
|
throttle = func() bool { return false }
|
||||||
|
@ -1295,7 +1305,9 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
||||||
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
|
glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err)
|
||||||
return d.queue.RetrieveHeaders(), err
|
|
||||||
|
filled, proced := d.queue.RetrieveHeaders()
|
||||||
|
return filled, proced, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchBodies iteratively downloads the scheduled block bodies, taking any
|
// fetchBodies iteratively downloads the scheduled block bodies, taking any
|
||||||
|
|
|
@ -1258,6 +1258,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||||
// rolled back, and also the pivot point being reverted to a non-block status.
|
// rolled back, and also the pivot point being reverted to a non-block status.
|
||||||
tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
|
tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts)
|
||||||
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
|
||||||
|
delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) // Make sure the fast-attacker doesn't fill in
|
||||||
delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
|
delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing])
|
||||||
|
|
||||||
if err := tester.sync("block-attack", nil, mode); err == nil {
|
if err := tester.sync("block-attack", nil, mode); err == nil {
|
||||||
|
|
|
@ -87,6 +87,7 @@ type queue struct {
|
||||||
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
|
||||||
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
|
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
|
||||||
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
|
||||||
|
headerProced int // [eth/62] Number of headers already processed from the results
|
||||||
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
headerOffset uint64 // [eth/62] Number of the first header in the result cache
|
||||||
headerContCh chan bool // [eth/62] Channel to notify when header download finishes
|
headerContCh chan bool // [eth/62] Channel to notify when header download finishes
|
||||||
|
|
||||||
|
@ -365,6 +366,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
||||||
q.headerTaskQueue = prque.New()
|
q.headerTaskQueue = prque.New()
|
||||||
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||||
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
||||||
|
q.headerProced = 0
|
||||||
q.headerOffset = from
|
q.headerOffset = from
|
||||||
q.headerContCh = make(chan bool, 1)
|
q.headerContCh = make(chan bool, 1)
|
||||||
|
|
||||||
|
@ -378,14 +380,14 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
||||||
|
|
||||||
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
|
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
|
||||||
// skeleton.
|
// skeleton.
|
||||||
func (q *queue) RetrieveHeaders() []*types.Header {
|
func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
headers := q.headerResults
|
headers, proced := q.headerResults, q.headerProced
|
||||||
q.headerResults = nil
|
q.headerResults, q.headerProced = nil, 0
|
||||||
|
|
||||||
return headers
|
return headers, proced
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule adds a set of headers for the download queue for scheduling, returning
|
// Schedule adds a set of headers for the download queue for scheduling, returning
|
||||||
|
@ -976,7 +978,11 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
|
||||||
// DeliverHeaders injects a header retrieval response into the header results
|
// DeliverHeaders injects a header retrieval response into the header results
|
||||||
// cache. This method either accepts all headers it received, or none of them
|
// cache. This method either accepts all headers it received, or none of them
|
||||||
// if they do not map correctly to the skeleton.
|
// if they do not map correctly to the skeleton.
|
||||||
func (q *queue) DeliverHeaders(id string, headers []*types.Header) (int, error) {
|
//
|
||||||
|
// If the headers are accepted, the method makes an attempt to deliver the set
|
||||||
|
// of ready headers to the processor to keep the pipeline full. However it will
|
||||||
|
// not block to prevent stalling other pending deliveries.
|
||||||
|
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
@ -1030,10 +1036,27 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header) (int, error)
|
||||||
q.headerTaskQueue.Push(request.From, -float32(request.From))
|
q.headerTaskQueue.Push(request.From, -float32(request.From))
|
||||||
return 0, errors.New("delivery not accepted")
|
return 0, errors.New("delivery not accepted")
|
||||||
}
|
}
|
||||||
// Clean up a successful fetch, check for termination and return
|
// Clean up a successful fetch and try to deliver any sub-results
|
||||||
copy(q.headerResults[request.From-q.headerOffset:], headers)
|
copy(q.headerResults[request.From-q.headerOffset:], headers)
|
||||||
delete(q.headerTaskPool, request.From)
|
delete(q.headerTaskPool, request.From)
|
||||||
|
|
||||||
|
ready := 0
|
||||||
|
for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
|
||||||
|
ready += MaxHeaderFetch
|
||||||
|
}
|
||||||
|
if ready > 0 {
|
||||||
|
// Headers are ready for delivery, gather them and push forward (non blocking)
|
||||||
|
process := make([]*types.Header, ready)
|
||||||
|
copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
|
||||||
|
|
||||||
|
select {
|
||||||
|
case headerProcCh <- process:
|
||||||
|
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
|
||||||
|
q.headerProced += len(process)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for termination and return
|
||||||
if len(q.headerTaskPool) == 0 {
|
if len(q.headerTaskPool) == 0 {
|
||||||
q.headerContCh <- false
|
q.headerContCh <- false
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue