mirror of https://github.com/status-im/op-geth.git
eth: update metrics collection to handle eth/62 algos
This commit is contained in:
parent
47a7fe5d22
commit
17f65cd1e5
|
@ -289,7 +289,7 @@ func updateChart(metric string, data []float64, base *int, chart *termui.LineCha
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unit, scale := 0, 1.0
|
unit, scale := 0, 1.0
|
||||||
for high >= 1000 {
|
for high >= 1000 && unit+1 < len(dataUnits) {
|
||||||
high, unit, scale = high/1000, unit+1, scale*1000
|
high, unit, scale = high/1000, unit+1, scale*1000
|
||||||
}
|
}
|
||||||
// If the unit changes, re-create the chart (hack to set max height...)
|
// If the unit changes, re-create the chart (hack to set max height...)
|
||||||
|
|
|
@ -526,6 +526,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
|
glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
|
||||||
|
|
||||||
// Create a timeout timer, and the associated hash fetcher
|
// Create a timeout timer, and the associated hash fetcher
|
||||||
|
request := time.Now() // time of the last fetch request
|
||||||
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
|
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
|
||||||
<-timeout.C // timeout channel should be initially empty
|
<-timeout.C // timeout channel should be initially empty
|
||||||
defer timeout.Stop()
|
defer timeout.Stop()
|
||||||
|
@ -534,6 +535,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
|
||||||
glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
|
glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
|
||||||
|
|
||||||
go p.getAbsHashes(from, MaxHashFetch)
|
go p.getAbsHashes(from, MaxHashFetch)
|
||||||
|
request = time.Now()
|
||||||
timeout.Reset(hashTTL)
|
timeout.Reset(hashTTL)
|
||||||
}
|
}
|
||||||
// Start pulling hashes, until all are exhausted
|
// Start pulling hashes, until all are exhausted
|
||||||
|
@ -557,6 +559,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
|
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
hashReqTimer.UpdateSince(request)
|
||||||
timeout.Stop()
|
timeout.Stop()
|
||||||
|
|
||||||
// If no more hashes are inbound, notify the block fetcher and return
|
// If no more hashes are inbound, notify the block fetcher and return
|
||||||
|
@ -609,6 +612,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
|
||||||
|
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
glog.V(logger.Debug).Infof("%v: hash request timed out", p)
|
glog.V(logger.Debug).Infof("%v: hash request timed out", p)
|
||||||
|
hashTimeoutMeter.Mark(1)
|
||||||
return errTimeout
|
return errTimeout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -896,6 +900,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
|
||||||
defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
|
defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
|
||||||
|
|
||||||
// Create a timeout timer, and the associated hash fetcher
|
// Create a timeout timer, and the associated hash fetcher
|
||||||
|
request := time.Now() // time of the last fetch request
|
||||||
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
|
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
|
||||||
<-timeout.C // timeout channel should be initially empty
|
<-timeout.C // timeout channel should be initially empty
|
||||||
defer timeout.Stop()
|
defer timeout.Stop()
|
||||||
|
@ -904,6 +909,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
|
||||||
glog.V(logger.Detail).Infof("%v: fetching %d headers from #%d", p, MaxHeaderFetch, from)
|
glog.V(logger.Detail).Infof("%v: fetching %d headers from #%d", p, MaxHeaderFetch, from)
|
||||||
|
|
||||||
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
||||||
|
request = time.Now()
|
||||||
timeout.Reset(headerTTL)
|
timeout.Reset(headerTTL)
|
||||||
}
|
}
|
||||||
// Start pulling headers, until all are exhausted
|
// Start pulling headers, until all are exhausted
|
||||||
|
@ -927,6 +933,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
|
||||||
glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", headerPack.peerId)
|
glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", headerPack.peerId)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
headerReqTimer.UpdateSince(request)
|
||||||
timeout.Stop()
|
timeout.Stop()
|
||||||
|
|
||||||
// If no more headers are inbound, notify the body fetcher and return
|
// If no more headers are inbound, notify the body fetcher and return
|
||||||
|
@ -980,6 +987,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
// Header retrieval timed out, consider the peer bad and drop
|
// Header retrieval timed out, consider the peer bad and drop
|
||||||
glog.V(logger.Debug).Infof("%v: header request timed out", p)
|
glog.V(logger.Debug).Infof("%v: header request timed out", p)
|
||||||
|
headerTimeoutMeter.Mark(1)
|
||||||
d.dropPeer(p.id)
|
d.dropPeer(p.id)
|
||||||
|
|
||||||
// Finish the sync gracefully instead of dumping the gathered data though
|
// Finish the sync gracefully instead of dumping the gathered data though
|
||||||
|
@ -1244,7 +1252,14 @@ func (d *Downloader) process() {
|
||||||
// DeliverHashes61 injects a new batch of hashes received from a remote node into
|
// DeliverHashes61 injects a new batch of hashes received from a remote node into
|
||||||
// the download schedule. This is usually invoked through the BlockHashesMsg by
|
// the download schedule. This is usually invoked through the BlockHashesMsg by
|
||||||
// the protocol handler.
|
// the protocol handler.
|
||||||
func (d *Downloader) DeliverHashes61(id string, hashes []common.Hash) error {
|
func (d *Downloader) DeliverHashes61(id string, hashes []common.Hash) (err error) {
|
||||||
|
// Update the delivery metrics for both good and failed deliveries
|
||||||
|
hashInMeter.Mark(int64(len(hashes)))
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
hashDropMeter.Mark(int64(len(hashes)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
// Make sure the downloader is active
|
// Make sure the downloader is active
|
||||||
if atomic.LoadInt32(&d.synchronising) == 0 {
|
if atomic.LoadInt32(&d.synchronising) == 0 {
|
||||||
return errNoSyncActive
|
return errNoSyncActive
|
||||||
|
@ -1265,7 +1280,14 @@ func (d *Downloader) DeliverHashes61(id string, hashes []common.Hash) error {
|
||||||
|
|
||||||
// DeliverBlocks61 injects a new batch of blocks received from a remote node.
|
// DeliverBlocks61 injects a new batch of blocks received from a remote node.
|
||||||
// This is usually invoked through the BlocksMsg by the protocol handler.
|
// This is usually invoked through the BlocksMsg by the protocol handler.
|
||||||
func (d *Downloader) DeliverBlocks61(id string, blocks []*types.Block) error {
|
func (d *Downloader) DeliverBlocks61(id string, blocks []*types.Block) (err error) {
|
||||||
|
// Update the delivery metrics for both good and failed deliveries
|
||||||
|
blockInMeter.Mark(int64(len(blocks)))
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
blockDropMeter.Mark(int64(len(blocks)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
// Make sure the downloader is active
|
// Make sure the downloader is active
|
||||||
if atomic.LoadInt32(&d.synchronising) == 0 {
|
if atomic.LoadInt32(&d.synchronising) == 0 {
|
||||||
return errNoSyncActive
|
return errNoSyncActive
|
||||||
|
@ -1286,7 +1308,14 @@ func (d *Downloader) DeliverBlocks61(id string, blocks []*types.Block) error {
|
||||||
|
|
||||||
// DeliverHeaders injects a new batch of blck headers received from a remote
|
// DeliverHeaders injects a new batch of blck headers received from a remote
|
||||||
// node into the download schedule.
|
// node into the download schedule.
|
||||||
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
|
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
|
||||||
|
// Update the delivery metrics for both good and failed deliveries
|
||||||
|
headerInMeter.Mark(int64(len(headers)))
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
headerDropMeter.Mark(int64(len(headers)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
// Make sure the downloader is active
|
// Make sure the downloader is active
|
||||||
if atomic.LoadInt32(&d.synchronising) == 0 {
|
if atomic.LoadInt32(&d.synchronising) == 0 {
|
||||||
return errNoSyncActive
|
return errNoSyncActive
|
||||||
|
@ -1306,7 +1335,14 @@ func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverBodies injects a new batch of block bodies received from a remote node.
|
// DeliverBodies injects a new batch of block bodies received from a remote node.
|
||||||
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {
|
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
|
||||||
|
// Update the delivery metrics for both good and failed deliveries
|
||||||
|
bodyInMeter.Mark(int64(len(transactions)))
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
bodyDropMeter.Mark(int64(len(transactions)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
// Make sure the downloader is active
|
// Make sure the downloader is active
|
||||||
if atomic.LoadInt32(&d.synchronising) == 0 {
|
if atomic.LoadInt32(&d.synchronising) == 0 {
|
||||||
return errNoSyncActive
|
return errNoSyncActive
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Contains the metrics collected by the downloader.
|
||||||
|
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hashInMeter = metrics.NewMeter("eth/downloader/hashes/in")
|
||||||
|
hashReqTimer = metrics.NewTimer("eth/downloader/hashes/req")
|
||||||
|
hashDropMeter = metrics.NewMeter("eth/downloader/hashes/drop")
|
||||||
|
hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
|
||||||
|
|
||||||
|
blockInMeter = metrics.NewMeter("eth/downloader/blocks/in")
|
||||||
|
blockReqTimer = metrics.NewTimer("eth/downloader/blocks/req")
|
||||||
|
blockDropMeter = metrics.NewMeter("eth/downloader/blocks/drop")
|
||||||
|
blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
|
||||||
|
|
||||||
|
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
|
||||||
|
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
|
||||||
|
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
|
||||||
|
headerTimeoutMeter = metrics.NewMeter("eth/downloader/headers/timeout")
|
||||||
|
|
||||||
|
bodyInMeter = metrics.NewMeter("eth/downloader/bodies/in")
|
||||||
|
bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req")
|
||||||
|
bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop")
|
||||||
|
bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
|
||||||
|
)
|
|
@ -397,9 +397,19 @@ func (q *queue) Expire(timeout time.Duration) []string {
|
||||||
peers := []string{}
|
peers := []string{}
|
||||||
for id, request := range q.pendPool {
|
for id, request := range q.pendPool {
|
||||||
if time.Since(request.Time) > timeout {
|
if time.Since(request.Time) > timeout {
|
||||||
|
// Update the metrics with the timeout
|
||||||
|
if len(request.Hashes) > 0 {
|
||||||
|
blockTimeoutMeter.Mark(1)
|
||||||
|
} else {
|
||||||
|
bodyTimeoutMeter.Mark(1)
|
||||||
|
}
|
||||||
|
// Return any non satisfied requests to the pool
|
||||||
for hash, index := range request.Hashes {
|
for hash, index := range request.Hashes {
|
||||||
q.hashQueue.Push(hash, float32(index))
|
q.hashQueue.Push(hash, float32(index))
|
||||||
}
|
}
|
||||||
|
for _, header := range request.Headers {
|
||||||
|
q.headerQueue.Push(header, -float32(header.Number.Uint64()))
|
||||||
|
}
|
||||||
peers = append(peers, id)
|
peers = append(peers, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -420,6 +430,7 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) {
|
||||||
if request == nil {
|
if request == nil {
|
||||||
return errNoFetchesPending
|
return errNoFetchesPending
|
||||||
}
|
}
|
||||||
|
blockReqTimer.UpdateSince(request.Time)
|
||||||
delete(q.pendPool, id)
|
delete(q.pendPool, id)
|
||||||
|
|
||||||
// If no blocks were retrieved, mark them as unavailable for the origin peer
|
// If no blocks were retrieved, mark them as unavailable for the origin peer
|
||||||
|
@ -468,6 +479,7 @@ func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists []
|
||||||
if request == nil {
|
if request == nil {
|
||||||
return errNoFetchesPending
|
return errNoFetchesPending
|
||||||
}
|
}
|
||||||
|
bodyReqTimer.UpdateSince(request.Time)
|
||||||
delete(q.pendPool, id)
|
delete(q.pendPool, id)
|
||||||
|
|
||||||
// If no block bodies were retrieved, mark them as unavailable for the origin peer
|
// If no block bodies were retrieved, mark them as unavailable for the origin peer
|
||||||
|
|
|
@ -347,18 +347,19 @@ func (f *Fetcher) loop() {
|
||||||
|
|
||||||
case notification := <-f.notify:
|
case notification := <-f.notify:
|
||||||
// A block was announced, make sure the peer isn't DOSing us
|
// A block was announced, make sure the peer isn't DOSing us
|
||||||
announceMeter.Mark(1)
|
propAnnounceInMeter.Mark(1)
|
||||||
|
|
||||||
count := f.announces[notification.origin] + 1
|
count := f.announces[notification.origin] + 1
|
||||||
if count > hashLimit {
|
if count > hashLimit {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
|
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
|
||||||
|
propAnnounceDOSMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If we have a valid block number, check that it's potentially useful
|
// If we have a valid block number, check that it's potentially useful
|
||||||
if notification.number > 0 {
|
if notification.number > 0 {
|
||||||
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
|
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist)
|
||||||
discardMeter.Mark(1)
|
propAnnounceDropMeter.Mark(1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -377,7 +378,7 @@ func (f *Fetcher) loop() {
|
||||||
|
|
||||||
case op := <-f.inject:
|
case op := <-f.inject:
|
||||||
// A direct block insertion was requested, try and fill any pending gaps
|
// A direct block insertion was requested, try and fill any pending gaps
|
||||||
broadcastMeter.Mark(1)
|
propBroadcastInMeter.Mark(1)
|
||||||
f.enqueue(op.origin, op.block)
|
f.enqueue(op.origin, op.block)
|
||||||
|
|
||||||
case hash := <-f.done:
|
case hash := <-f.done:
|
||||||
|
@ -425,10 +426,12 @@ func (f *Fetcher) loop() {
|
||||||
}
|
}
|
||||||
if fetchBlocks != nil {
|
if fetchBlocks != nil {
|
||||||
// Use old eth/61 protocol to retrieve whole blocks
|
// Use old eth/61 protocol to retrieve whole blocks
|
||||||
|
blockFetchMeter.Mark(int64(len(hashes)))
|
||||||
fetchBlocks(hashes)
|
fetchBlocks(hashes)
|
||||||
} else {
|
} else {
|
||||||
// Use new eth/62 protocol to retrieve headers first
|
// Use new eth/62 protocol to retrieve headers first
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
|
headerFetchMeter.Mark(1)
|
||||||
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
|
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -467,6 +470,7 @@ func (f *Fetcher) loop() {
|
||||||
if f.completingHook != nil {
|
if f.completingHook != nil {
|
||||||
f.completingHook(hashes)
|
f.completingHook(hashes)
|
||||||
}
|
}
|
||||||
|
bodyFetchMeter.Mark(int64(len(hashes)))
|
||||||
go f.completing[hashes[0]].fetchBodies(hashes)
|
go f.completing[hashes[0]].fetchBodies(hashes)
|
||||||
}
|
}
|
||||||
// Schedule the next fetch if blocks are still pending
|
// Schedule the next fetch if blocks are still pending
|
||||||
|
@ -480,6 +484,7 @@ func (f *Fetcher) loop() {
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
blockFilterInMeter.Mark(int64(len(blocks)))
|
||||||
|
|
||||||
explicit, download := []*types.Block{}, []*types.Block{}
|
explicit, download := []*types.Block{}, []*types.Block{}
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
|
@ -498,6 +503,7 @@ func (f *Fetcher) loop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blockFilterOutMeter.Mark(int64(len(download)))
|
||||||
select {
|
select {
|
||||||
case filter <- download:
|
case filter <- download:
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
|
@ -520,6 +526,8 @@ func (f *Fetcher) loop() {
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
headerFilterInMeter.Mark(int64(len(task.headers)))
|
||||||
|
|
||||||
// Split the batch of headers into unknown ones (to return to the caller),
|
// Split the batch of headers into unknown ones (to return to the caller),
|
||||||
// known incomplete ones (requiring body retrievals) and completed blocks.
|
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||||
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
|
unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
|
||||||
|
@ -544,7 +552,10 @@ func (f *Fetcher) loop() {
|
||||||
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
|
||||||
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4])
|
||||||
|
|
||||||
complete = append(complete, types.NewBlockWithHeader(header))
|
block := types.NewBlockWithHeader(header)
|
||||||
|
block.ReceivedAt = task.time
|
||||||
|
|
||||||
|
complete = append(complete, block)
|
||||||
f.completing[hash] = announce
|
f.completing[hash] = announce
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -559,6 +570,7 @@ func (f *Fetcher) loop() {
|
||||||
unknown = append(unknown, header)
|
unknown = append(unknown, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
headerFilterOutMeter.Mark(int64(len(unknown)))
|
||||||
select {
|
select {
|
||||||
case filter <- &headerFilterTask{headers: unknown, time: task.time}:
|
case filter <- &headerFilterTask{headers: unknown, time: task.time}:
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
|
@ -590,6 +602,7 @@ func (f *Fetcher) loop() {
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
bodyFilterInMeter.Mark(int64(len(task.transactions)))
|
||||||
|
|
||||||
blocks := []*types.Block{}
|
blocks := []*types.Block{}
|
||||||
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
|
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
|
||||||
|
@ -606,7 +619,10 @@ func (f *Fetcher) loop() {
|
||||||
matched = true
|
matched = true
|
||||||
|
|
||||||
if f.getBlock(hash) == nil {
|
if f.getBlock(hash) == nil {
|
||||||
blocks = append(blocks, types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]))
|
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
|
||||||
|
block.ReceivedAt = task.time
|
||||||
|
|
||||||
|
blocks = append(blocks, block)
|
||||||
} else {
|
} else {
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
}
|
}
|
||||||
|
@ -621,6 +637,7 @@ func (f *Fetcher) loop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bodyFilterOutMeter.Mark(int64(len(task.transactions)))
|
||||||
select {
|
select {
|
||||||
case filter <- task:
|
case filter <- task:
|
||||||
case <-f.quit:
|
case <-f.quit:
|
||||||
|
@ -677,13 +694,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
||||||
count := f.queues[peer] + 1
|
count := f.queues[peer] + 1
|
||||||
if count > blockLimit {
|
if count > blockLimit {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
|
||||||
|
propBroadcastDOSMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Discard any past or too distant blocks
|
// Discard any past or too distant blocks
|
||||||
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
|
||||||
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
|
||||||
discardMeter.Mark(1)
|
propBroadcastDropMeter.Mark(1)
|
||||||
f.forgetHash(hash)
|
f.forgetHash(hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -724,11 +742,10 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||||
switch err := f.validateBlock(block, parent); err {
|
switch err := f.validateBlock(block, parent); err {
|
||||||
case nil:
|
case nil:
|
||||||
// All ok, quickly propagate to our peers
|
// All ok, quickly propagate to our peers
|
||||||
broadcastTimer.UpdateSince(block.ReceivedAt)
|
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||||
go f.broadcastBlock(block, true)
|
go f.broadcastBlock(block, true)
|
||||||
|
|
||||||
case core.BlockFutureErr:
|
case core.BlockFutureErr:
|
||||||
futureMeter.Mark(1)
|
|
||||||
// Weird future block, don't fail, but neither propagate
|
// Weird future block, don't fail, but neither propagate
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -743,7 +760,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If import succeeded, broadcast the block
|
// If import succeeded, broadcast the block
|
||||||
announceTimer.UpdateSince(block.ReceivedAt)
|
propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
|
||||||
go f.broadcastBlock(block, false)
|
go f.broadcastBlock(block, false)
|
||||||
|
|
||||||
// Invoke the testing hook if needed
|
// Invoke the testing hook if needed
|
||||||
|
|
|
@ -23,10 +23,24 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
announceMeter = metrics.NewMeter("eth/sync/RemoteAnnounces")
|
propAnnounceInMeter = metrics.NewMeter("eth/fetcher/prop/announces/in")
|
||||||
announceTimer = metrics.NewTimer("eth/sync/LocalAnnounces")
|
propAnnounceOutTimer = metrics.NewTimer("eth/fetcher/prop/announces/out")
|
||||||
broadcastMeter = metrics.NewMeter("eth/sync/RemoteBroadcasts")
|
propAnnounceDropMeter = metrics.NewMeter("eth/fetcher/prop/announces/drop")
|
||||||
broadcastTimer = metrics.NewTimer("eth/sync/LocalBroadcasts")
|
propAnnounceDOSMeter = metrics.NewMeter("eth/fetcher/prop/announces/dos")
|
||||||
discardMeter = metrics.NewMeter("eth/sync/DiscardedBlocks")
|
|
||||||
futureMeter = metrics.NewMeter("eth/sync/FutureBlocks")
|
propBroadcastInMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/in")
|
||||||
|
propBroadcastOutTimer = metrics.NewTimer("eth/fetcher/prop/broadcasts/out")
|
||||||
|
propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
|
||||||
|
propBroadcastDOSMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
|
||||||
|
|
||||||
|
blockFetchMeter = metrics.NewMeter("eth/fetcher/fetch/blocks")
|
||||||
|
headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
|
||||||
|
bodyFetchMeter = metrics.NewMeter("eth/fetcher/fetch/bodies")
|
||||||
|
|
||||||
|
blockFilterInMeter = metrics.NewMeter("eth/fetcher/filter/blocks/in")
|
||||||
|
blockFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/blocks/out")
|
||||||
|
headerFilterInMeter = metrics.NewMeter("eth/fetcher/filter/headers/in")
|
||||||
|
headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
|
||||||
|
bodyFilterInMeter = metrics.NewMeter("eth/fetcher/filter/bodies/in")
|
||||||
|
bodyFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/bodies/out")
|
||||||
)
|
)
|
||||||
|
|
|
@ -42,22 +42,22 @@ var (
|
||||||
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
|
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
|
||||||
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
|
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
|
||||||
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
|
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
|
||||||
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/header/in/packets")
|
reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets")
|
||||||
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/header/in/traffic")
|
reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic")
|
||||||
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/header/out/packets")
|
reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets")
|
||||||
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/header/out/traffic")
|
reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic")
|
||||||
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/body/in/packets")
|
reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets")
|
||||||
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/body/in/traffic")
|
reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic")
|
||||||
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/body/out/packets")
|
reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets")
|
||||||
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/body/out/traffic")
|
reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic")
|
||||||
reqStateInPacketsMeter = metrics.NewMeter("eth/req/state/in/packets")
|
reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets")
|
||||||
reqStateInTrafficMeter = metrics.NewMeter("eth/req/state/in/traffic")
|
reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic")
|
||||||
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/state/out/packets")
|
reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets")
|
||||||
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/state/out/traffic")
|
reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic")
|
||||||
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipt/in/packets")
|
reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets")
|
||||||
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipt/in/traffic")
|
reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic")
|
||||||
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipt/out/packets")
|
reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets")
|
||||||
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipt/out/traffic")
|
reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")
|
||||||
miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets")
|
miscInPacketsMeter = metrics.NewMeter("eth/misc/in/packets")
|
||||||
miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic")
|
miscInTrafficMeter = metrics.NewMeter("eth/misc/in/traffic")
|
||||||
miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets")
|
miscOutPacketsMeter = metrics.NewMeter("eth/misc/out/packets")
|
||||||
|
|
Loading…
Reference in New Issue