mirror of https://github.com/status-im/op-geth.git
Merge pull request #835 from obscuren/handler_errors
eth, eth/downloader: error handlers and td checks
This commit is contained in:
commit
ac85fdc75e
|
@ -47,7 +47,7 @@ import _ "net/http/pprof"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ClientIdentifier = "Geth"
|
ClientIdentifier = "Geth"
|
||||||
Version = "0.9.14"
|
Version = "0.9.15"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -49,6 +49,10 @@ func CalcDifficulty(block, parent *types.Header) *big.Int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func CalculateTD(block, parent *types.Block) *big.Int {
|
func CalculateTD(block, parent *types.Block) *big.Int {
|
||||||
|
if parent == nil {
|
||||||
|
return block.Difficulty()
|
||||||
|
}
|
||||||
|
|
||||||
td := new(big.Int).Add(parent.Td, block.Header().Difficulty)
|
td := new(big.Int).Add(parent.Td, block.Header().Difficulty)
|
||||||
|
|
||||||
return td
|
return td
|
||||||
|
@ -89,6 +93,7 @@ type ChainManager struct {
|
||||||
futureBlocks *BlockCache
|
futureBlocks *BlockCache
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
|
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
|
||||||
|
@ -478,6 +483,10 @@ func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) {
|
||||||
|
|
||||||
func (bc *ChainManager) Stop() {
|
func (bc *ChainManager) Stop() {
|
||||||
close(bc.quit)
|
close(bc.quit)
|
||||||
|
|
||||||
|
bc.wg.Wait()
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infoln("Chain manager stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
type queueEvent struct {
|
type queueEvent struct {
|
||||||
|
@ -500,22 +509,30 @@ func (self *ChainManager) procFutureBlocks() {
|
||||||
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned
|
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned
|
||||||
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
|
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
|
||||||
func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||||
|
self.wg.Add(1)
|
||||||
|
defer self.wg.Done()
|
||||||
|
|
||||||
// A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring.
|
// A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring.
|
||||||
var (
|
var (
|
||||||
queue = make([]interface{}, len(chain))
|
queue = make([]interface{}, len(chain))
|
||||||
queueEvent = queueEvent{queue: queue}
|
queueEvent = queueEvent{queue: queue}
|
||||||
stats struct{ queued, processed int }
|
stats struct{ queued, processed, ignored int }
|
||||||
tstart = time.Now()
|
tstart = time.Now()
|
||||||
)
|
)
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
if block == nil {
|
if block == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Setting block.Td regardless of error (known for example) prevents errors down the line
|
||||||
|
// in the protocol handler
|
||||||
|
block.Td = new(big.Int).Set(CalculateTD(block, self.GetBlock(block.ParentHash())))
|
||||||
|
|
||||||
// Call in to the block processor and check for errors. It's likely that if one block fails
|
// Call in to the block processor and check for errors. It's likely that if one block fails
|
||||||
// all others will fail too (unless a known block is returned).
|
// all others will fail too (unless a known block is returned).
|
||||||
logs, err := self.processor.Process(block)
|
logs, err := self.processor.Process(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsKnownBlockErr(err) {
|
if IsKnownBlockErr(err) {
|
||||||
|
stats.ignored++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,8 +562,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
|
|
||||||
block.Td = new(big.Int).Set(CalculateTD(block, self.GetBlock(block.ParentHash())))
|
|
||||||
|
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
{
|
{
|
||||||
cblock := self.currentBlock
|
cblock := self.currentBlock
|
||||||
|
@ -589,7 +604,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||||
queueEvent.canonicalCount++
|
queueEvent.canonicalCount++
|
||||||
|
|
||||||
if glog.V(logger.Debug) {
|
if glog.V(logger.Debug) {
|
||||||
glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
|
glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...)\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if glog.V(logger.Detail) {
|
if glog.V(logger.Detail) {
|
||||||
|
@ -607,10 +622,10 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats.queued > 0 || stats.processed > 0) && bool(glog.V(logger.Info)) {
|
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
|
||||||
tend := time.Since(tstart)
|
tend := time.Since(tstart)
|
||||||
start, end := chain[0], chain[len(chain)-1]
|
start, end := chain[0], chain[len(chain)-1]
|
||||||
glog.Infof("imported %d block(s) %d queued in %v. #%v [%x / %x]\n", stats.processed, stats.queued, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
|
glog.Infof("imported %d block(s) (%d queued %d ignored) in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
|
|
||||||
go self.eventMux.Post(queueEvent)
|
go self.eventMux.Post(queueEvent)
|
||||||
|
@ -654,7 +669,7 @@ func (self *ChainManager) merge(oldBlock, newBlock *types.Block) {
|
||||||
|
|
||||||
func (self *ChainManager) update() {
|
func (self *ChainManager) update() {
|
||||||
events := self.eventMux.Subscribe(queueEvent{})
|
events := self.eventMux.Subscribe(queueEvent{})
|
||||||
futureTimer := time.NewTicker(5 * time.Second)
|
futureTimer := time.Tick(5 * time.Second)
|
||||||
out:
|
out:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -681,7 +696,7 @@ out:
|
||||||
self.eventMux.Post(event)
|
self.eventMux.Post(event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case <-futureTimer.C:
|
case <-futureTimer:
|
||||||
self.procFutureBlocks()
|
self.procFutureBlocks()
|
||||||
case <-self.quit:
|
case <-self.quit:
|
||||||
break out
|
break out
|
||||||
|
|
|
@ -219,7 +219,7 @@ func New(config *Config) (*Ethereum, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
|
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
|
||||||
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain)
|
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.GetBlock)
|
||||||
eth.pow = ethash.New(eth.chainManager)
|
eth.pow = ethash.New(eth.chainManager)
|
||||||
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
|
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
|
||||||
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
|
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
|
||||||
|
@ -455,6 +455,7 @@ func (s *Ethereum) Stop() {
|
||||||
s.txSub.Unsubscribe() // quits txBroadcastLoop
|
s.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||||
|
|
||||||
s.protocolManager.Stop()
|
s.protocolManager.Stop()
|
||||||
|
s.chainManager.Stop()
|
||||||
s.txPool.Stop()
|
s.txPool.Stop()
|
||||||
s.eventMux.Stop()
|
s.eventMux.Stop()
|
||||||
if s.whisper != nil {
|
if s.whisper != nil {
|
||||||
|
|
|
@ -3,14 +3,11 @@ package downloader
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
@ -27,16 +24,21 @@ var (
|
||||||
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
||||||
blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out
|
blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out
|
||||||
|
|
||||||
errLowTd = errors.New("peer's TD is too low")
|
errLowTd = errors.New("peer's TD is too low")
|
||||||
errBusy = errors.New("busy")
|
errBusy = errors.New("busy")
|
||||||
errUnknownPeer = errors.New("peer's unknown or unhealthy")
|
errUnknownPeer = errors.New("peer's unknown or unhealthy")
|
||||||
errBadPeer = errors.New("action from bad peer ignored")
|
ErrBadPeer = errors.New("action from bad peer ignored")
|
||||||
errTimeout = errors.New("timeout")
|
errNoPeers = errors.New("no peers to keep download active")
|
||||||
errEmptyHashSet = errors.New("empty hash set by peer")
|
errPendingQueue = errors.New("pending items in queue")
|
||||||
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
|
errTimeout = errors.New("timeout")
|
||||||
|
errEmptyHashSet = errors.New("empty hash set by peer")
|
||||||
|
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
|
||||||
|
errAlreadyInPool = errors.New("hash already in pool")
|
||||||
|
errBlockNumberOverflow = errors.New("received block which overflows")
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashCheckFn func(common.Hash) bool
|
type hashCheckFn func(common.Hash) bool
|
||||||
|
type getBlockFn func(common.Hash) *types.Block
|
||||||
type chainInsertFn func(types.Blocks) (int, error)
|
type chainInsertFn func(types.Blocks) (int, error)
|
||||||
type hashIterFn func() (common.Hash, error)
|
type hashIterFn func() (common.Hash, error)
|
||||||
|
|
||||||
|
@ -51,6 +53,11 @@ type syncPack struct {
|
||||||
ignoreInitial bool
|
ignoreInitial bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type hashPack struct {
|
||||||
|
peerId string
|
||||||
|
hashes []common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
queue *queue
|
queue *queue
|
||||||
|
@ -58,29 +65,28 @@ type Downloader struct {
|
||||||
activePeer string
|
activePeer string
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
hasBlock hashCheckFn
|
hasBlock hashCheckFn
|
||||||
insertChain chainInsertFn
|
getBlock getBlockFn
|
||||||
|
|
||||||
// Status
|
// Status
|
||||||
fetchingHashes int32
|
fetchingHashes int32
|
||||||
downloadingBlocks int32
|
downloadingBlocks int32
|
||||||
processingBlocks int32
|
|
||||||
|
|
||||||
// Channels
|
// Channels
|
||||||
newPeerCh chan *peer
|
newPeerCh chan *peer
|
||||||
hashCh chan []common.Hash
|
hashCh chan hashPack
|
||||||
blockCh chan blockPack
|
blockCh chan blockPack
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader {
|
func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
|
||||||
downloader := &Downloader{
|
downloader := &Downloader{
|
||||||
queue: newqueue(),
|
queue: newqueue(),
|
||||||
peers: make(peers),
|
peers: make(peers),
|
||||||
hasBlock: hasBlock,
|
hasBlock: hasBlock,
|
||||||
insertChain: insertChain,
|
getBlock: getBlock,
|
||||||
newPeerCh: make(chan *peer, 1),
|
newPeerCh: make(chan *peer, 1),
|
||||||
hashCh: make(chan []common.Hash, 1),
|
hashCh: make(chan hashPack, 1),
|
||||||
blockCh: make(chan blockPack, 1),
|
blockCh: make(chan blockPack, 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
return downloader
|
return downloader
|
||||||
|
@ -126,6 +132,12 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
||||||
return errBusy
|
return errBusy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// When a synchronisation attempt is made while the queue stil
|
||||||
|
// contains items we abort the sync attempt
|
||||||
|
if d.queue.size() > 0 {
|
||||||
|
return errPendingQueue
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch the peer using the id or throw an error if the peer couldn't be found
|
// Fetch the peer using the id or throw an error if the peer couldn't be found
|
||||||
p := d.peers[id]
|
p := d.peers[id]
|
||||||
if p == nil {
|
if p == nil {
|
||||||
|
@ -138,30 +150,87 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.process(p)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
|
// Done lets the downloader know that whatever previous hashes were taken
|
||||||
|
// are processed. If the block count reaches zero and done is called
|
||||||
|
// we reset the queue for the next batch of incoming hashes and blocks.
|
||||||
|
func (d *Downloader) Done() {
|
||||||
|
d.queue.mu.Lock()
|
||||||
|
defer d.queue.mu.Unlock()
|
||||||
|
|
||||||
|
if len(d.queue.blocks) == 0 {
|
||||||
|
d.queue.resetNoTS()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TakeBlocks takes blocks from the queue and yields them to the blockTaker handler
|
||||||
|
// it's possible it yields no blocks
|
||||||
|
func (d *Downloader) TakeBlocks() types.Blocks {
|
||||||
|
d.queue.mu.Lock()
|
||||||
|
defer d.queue.mu.Unlock()
|
||||||
|
|
||||||
|
var blocks types.Blocks
|
||||||
|
if len(d.queue.blocks) > 0 {
|
||||||
|
// Make sure the parent hash is known
|
||||||
|
if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, block := range d.queue.blocks {
|
||||||
|
if block == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks = append(blocks, block)
|
||||||
|
}
|
||||||
|
d.queue.blockOffset += len(blocks)
|
||||||
|
// delete the blocks from the slice and let them be garbage collected
|
||||||
|
// without this slice trick the blocks would stay in memory until nil
|
||||||
|
// would be assigned to d.queue.blocks
|
||||||
|
copy(d.queue.blocks, d.queue.blocks[len(blocks):])
|
||||||
|
for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ {
|
||||||
|
d.queue.blocks[k] = nil
|
||||||
|
}
|
||||||
|
d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)]
|
||||||
|
|
||||||
|
//d.queue.blocks = d.queue.blocks[len(blocks):]
|
||||||
|
if len(d.queue.blocks) == 0 {
|
||||||
|
d.queue.blocks = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) Has(hash common.Hash) bool {
|
||||||
|
return d.queue.has(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) {
|
||||||
d.activePeer = p.id
|
d.activePeer = p.id
|
||||||
|
defer func() {
|
||||||
|
// reset on error
|
||||||
|
if err != nil {
|
||||||
|
d.queue.reset()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
|
glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
|
||||||
// Start the fetcher. This will block the update entirely
|
// Start the fetcher. This will block the update entirely
|
||||||
// interupts need to be send to the appropriate channels
|
// interupts need to be send to the appropriate channels
|
||||||
// respectively.
|
// respectively.
|
||||||
if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
|
if err = d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
|
||||||
// handle error
|
|
||||||
glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
|
|
||||||
// XXX Reset
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start fetching blocks in paralel. The strategy is simple
|
// Start fetching blocks in paralel. The strategy is simple
|
||||||
// take any available peers, seserve a chunk for each peer available,
|
// take any available peers, seserve a chunk for each peer available,
|
||||||
// let the peer deliver the chunkn and periodically check if a peer
|
// let the peer deliver the chunkn and periodically check if a peer
|
||||||
// has timedout. When done downloading, process blocks.
|
// has timedout.
|
||||||
if err := d.startFetchingBlocks(p); err != nil {
|
if err = d.startFetchingBlocks(p); err != nil {
|
||||||
glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
|
|
||||||
// XXX reset
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,11 +240,15 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX Make synchronous
|
// XXX Make synchronous
|
||||||
func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitial bool) error {
|
func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial bool) error {
|
||||||
atomic.StoreInt32(&d.fetchingHashes, 1)
|
atomic.StoreInt32(&d.fetchingHashes, 1)
|
||||||
defer atomic.StoreInt32(&d.fetchingHashes, 0)
|
defer atomic.StoreInt32(&d.fetchingHashes, 0)
|
||||||
|
|
||||||
glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id)
|
if d.queue.has(h) {
|
||||||
|
return errAlreadyInPool
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id)
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
|
@ -183,23 +256,38 @@ func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitia
|
||||||
// In such circumstances we don't need to download the block so don't add it to the queue.
|
// In such circumstances we don't need to download the block so don't add it to the queue.
|
||||||
if !ignoreInitial {
|
if !ignoreInitial {
|
||||||
// Add the hash to the queue first
|
// Add the hash to the queue first
|
||||||
d.queue.hashPool.Add(hash)
|
d.queue.hashPool.Add(h)
|
||||||
}
|
}
|
||||||
// Get the first batch of hashes
|
// Get the first batch of hashes
|
||||||
p.getHashes(hash)
|
p.getHashes(h)
|
||||||
|
|
||||||
failureResponseTimer := time.NewTimer(hashTtl)
|
var (
|
||||||
|
failureResponseTimer = time.NewTimer(hashTtl)
|
||||||
|
attemptedPeers = make(map[string]bool) // attempted peers will help with retries
|
||||||
|
activePeer = p // active peer will help determine the current active peer
|
||||||
|
hash common.Hash // common and last hash
|
||||||
|
)
|
||||||
|
attemptedPeers[p.id] = true
|
||||||
|
|
||||||
out:
|
out:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case hashes := <-d.hashCh:
|
case hashPack := <-d.hashCh:
|
||||||
|
// make sure the active peer is giving us the hashes
|
||||||
|
if hashPack.peerId != activePeer.id {
|
||||||
|
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
failureResponseTimer.Reset(hashTtl)
|
failureResponseTimer.Reset(hashTtl)
|
||||||
|
|
||||||
var done bool // determines whether we're done fetching hashes (i.e. common hash found)
|
var (
|
||||||
|
hashes = hashPack.hashes
|
||||||
|
done bool // determines whether we're done fetching hashes (i.e. common hash found)
|
||||||
|
)
|
||||||
hashSet := set.New()
|
hashSet := set.New()
|
||||||
for _, hash := range hashes {
|
for _, hash = range hashes {
|
||||||
if d.hasBlock(hash) {
|
if d.hasBlock(hash) || d.queue.blockHashes.Has(hash) {
|
||||||
glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4])
|
glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4])
|
||||||
|
|
||||||
done = true
|
done = true
|
||||||
|
@ -212,24 +300,50 @@ out:
|
||||||
|
|
||||||
// Add hashes to the chunk set
|
// Add hashes to the chunk set
|
||||||
if len(hashes) == 0 { // Make sure the peer actually gave you something valid
|
if len(hashes) == 0 { // Make sure the peer actually gave you something valid
|
||||||
glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", p.id)
|
glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id)
|
||||||
d.queue.reset()
|
d.queue.reset()
|
||||||
|
|
||||||
return errEmptyHashSet
|
return errEmptyHashSet
|
||||||
} else if !done { // Check if we're done fetching
|
} else if !done { // Check if we're done fetching
|
||||||
// Get the next set of hashes
|
// Get the next set of hashes
|
||||||
p.getHashes(hashes[len(hashes)-1])
|
activePeer.getHashes(hash)
|
||||||
} else { // we're done
|
} else { // we're done
|
||||||
|
// The offset of the queue is determined by the highest known block
|
||||||
|
var offset int
|
||||||
|
if block := d.getBlock(hash); block != nil {
|
||||||
|
offset = int(block.NumberU64() + 1)
|
||||||
|
}
|
||||||
|
// allocate proper size for the queueue
|
||||||
|
d.queue.alloc(offset, d.queue.hashPool.Size())
|
||||||
|
|
||||||
break out
|
break out
|
||||||
}
|
}
|
||||||
case <-failureResponseTimer.C:
|
case <-failureResponseTimer.C:
|
||||||
glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id)
|
glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id)
|
||||||
// TODO instead of reseting the queue select a new peer from which we can start downloading hashes.
|
|
||||||
// 1. check for peer's best hash to be included in the current hash set;
|
|
||||||
// 2. resume from last point (hashes[len(hashes)-1]) using the newly selected peer.
|
|
||||||
d.queue.reset()
|
|
||||||
|
|
||||||
return errTimeout
|
var p *peer // p will be set if a peer can be found
|
||||||
|
// Attempt to find a new peer by checking inclusion of peers best hash in our
|
||||||
|
// already fetched hash list. This can't guarantee 100% correctness but does
|
||||||
|
// a fair job. This is always either correct or false incorrect.
|
||||||
|
for id, peer := range d.peers {
|
||||||
|
if d.queue.hashPool.Has(peer.recentHash) && !attemptedPeers[id] {
|
||||||
|
p = peer
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if all peers have been tried, abort the process entirely or if the hash is
|
||||||
|
// the zero hash.
|
||||||
|
if p == nil || (hash == common.Hash{}) {
|
||||||
|
d.queue.reset()
|
||||||
|
return errTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// set p to the active peer. this will invalidate any hashes that may be returned
|
||||||
|
// by our previous (delayed) peer.
|
||||||
|
activePeer = p
|
||||||
|
p.getHashes(hash)
|
||||||
|
glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start))
|
glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start))
|
||||||
|
@ -257,11 +371,27 @@ out:
|
||||||
// If the peer was previously banned and failed to deliver it's pack
|
// If the peer was previously banned and failed to deliver it's pack
|
||||||
// in a reasonable time frame, ignore it's message.
|
// in a reasonable time frame, ignore it's message.
|
||||||
if d.peers[blockPack.peerId] != nil {
|
if d.peers[blockPack.peerId] != nil {
|
||||||
|
err := d.queue.deliver(blockPack.peerId, blockPack.blocks)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err)
|
||||||
|
// FIXME d.UnregisterPeer(blockPack.peerId)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if glog.V(logger.Debug) {
|
||||||
|
glog.Infof("adding %d blocks from: %s\n", len(blockPack.blocks), blockPack.peerId)
|
||||||
|
}
|
||||||
d.peers[blockPack.peerId].promote()
|
d.peers[blockPack.peerId].promote()
|
||||||
d.queue.deliver(blockPack.peerId, blockPack.blocks)
|
|
||||||
d.peers.setState(blockPack.peerId, idleState)
|
d.peers.setState(blockPack.peerId, idleState)
|
||||||
}
|
}
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
|
// after removing bad peers make sure we actually have suffucient peer left to keep downlading
|
||||||
|
if len(d.peers) == 0 {
|
||||||
|
d.queue.reset()
|
||||||
|
|
||||||
|
return errNoPeers
|
||||||
|
}
|
||||||
|
|
||||||
// If there are unrequested hashes left start fetching
|
// If there are unrequested hashes left start fetching
|
||||||
// from the available peers.
|
// from the available peers.
|
||||||
if d.queue.hashPool.Size() > 0 {
|
if d.queue.hashPool.Size() > 0 {
|
||||||
|
@ -310,7 +440,7 @@ out:
|
||||||
if time.Since(chunk.itime) > blockTtl {
|
if time.Since(chunk.itime) > blockTtl {
|
||||||
badPeers = append(badPeers, pid)
|
badPeers = append(badPeers, pid)
|
||||||
// remove peer as good peer from peer list
|
// remove peer as good peer from peer list
|
||||||
//d.UnregisterPeer(pid)
|
// FIXME d.UnregisterPeer(pid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.queue.mu.Unlock()
|
d.queue.mu.Unlock()
|
||||||
|
@ -354,114 +484,16 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
|
||||||
return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer)
|
return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.hashCh <- hashes
|
if glog.V(logger.Detail) && len(hashes) != 0 {
|
||||||
|
from, to := hashes[0], hashes[len(hashes)-1]
|
||||||
|
glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.hashPool.Size(), from[:4], to[:4], id)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.hashCh <- hashPack{id, hashes}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an (unrequested) block to the downloader. This is usually done through the
|
|
||||||
// NewBlockMsg by the protocol handler.
|
|
||||||
// Adding blocks is done synchronously. if there are missing blocks, blocks will be
|
|
||||||
// fetched first. If the downloader is busy or if some other processed failed an error
|
|
||||||
// will be returned.
|
|
||||||
func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error {
|
|
||||||
hash := block.Hash()
|
|
||||||
|
|
||||||
if d.hasBlock(hash) {
|
|
||||||
return fmt.Errorf("known block %x", hash.Bytes()[:4])
|
|
||||||
}
|
|
||||||
|
|
||||||
peer := d.peers.getPeer(id)
|
|
||||||
// if the peer is in our healthy list of peers; update the td
|
|
||||||
// and add the block. Otherwise just ignore it
|
|
||||||
if peer == nil {
|
|
||||||
glog.V(logger.Detail).Infof("Ignored block from bad peer %s\n", id)
|
|
||||||
return errBadPeer
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.mu.Lock()
|
|
||||||
peer.recentHash = block.Hash()
|
|
||||||
peer.mu.Unlock()
|
|
||||||
peer.promote()
|
|
||||||
|
|
||||||
glog.V(logger.Detail).Infoln("Inserting new block from:", id)
|
|
||||||
d.queue.addBlock(id, block)
|
|
||||||
|
|
||||||
// if neither go ahead to process
|
|
||||||
if d.isBusy() {
|
|
||||||
return errBusy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the parent of the received block is known.
|
|
||||||
// If the block is not know, request it otherwise, request.
|
|
||||||
phash := block.ParentHash()
|
|
||||||
if !d.hasBlock(phash) {
|
|
||||||
glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4])
|
|
||||||
|
|
||||||
// Get the missing hashes from the peer (synchronously)
|
|
||||||
err := d.getFromPeer(peer, peer.recentHash, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.process(peer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) process(peer *peer) error {
|
|
||||||
atomic.StoreInt32(&d.processingBlocks, 1)
|
|
||||||
defer atomic.StoreInt32(&d.processingBlocks, 0)
|
|
||||||
|
|
||||||
// XXX this will move when optimised
|
|
||||||
// Sort the blocks by number. This bit needs much improvement. Right now
|
|
||||||
// it assumes full honesty form peers (i.e. it's not checked when the blocks
|
|
||||||
// link). We should at least check whihc queue match. This code could move
|
|
||||||
// to a seperate goroutine where it periodically checks for linked pieces.
|
|
||||||
types.BlockBy(types.Number).Sort(d.queue.blocks)
|
|
||||||
if len(d.queue.blocks) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
blocks = d.queue.blocks
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number())
|
|
||||||
|
|
||||||
// Loop untill we're out of blocks
|
|
||||||
for len(blocks) != 0 {
|
|
||||||
max := int(math.Min(float64(len(blocks)), 256))
|
|
||||||
// TODO check for parent error. When there's a parent error we should stop
|
|
||||||
// processing and start requesting the `block.hash` so that it's parent and
|
|
||||||
// grandparents can be requested and queued.
|
|
||||||
var i int
|
|
||||||
i, err = d.insertChain(blocks[:max])
|
|
||||||
if err != nil && core.IsParentErr(err) {
|
|
||||||
// Ignore the missing blocks. Handler should take care of anything that's missing.
|
|
||||||
glog.V(logger.Debug).Infof("Ignored block with missing parent (%d)\n", i)
|
|
||||||
blocks = blocks[i+1:]
|
|
||||||
|
|
||||||
continue
|
|
||||||
} else if err != nil {
|
|
||||||
// immediatly unregister the false peer but do not disconnect
|
|
||||||
d.UnregisterPeer(d.activePeer)
|
|
||||||
// Reset chain completely. This needs much, much improvement.
|
|
||||||
// instead: check all blocks leading down to this block false block and remove it
|
|
||||||
blocks = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
blocks = blocks[max:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will allow the GC to remove the in memory blocks
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
d.queue.blocks = nil
|
|
||||||
} else {
|
|
||||||
d.queue.blocks = blocks
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) isFetchingHashes() bool {
|
func (d *Downloader) isFetchingHashes() bool {
|
||||||
return atomic.LoadInt32(&d.fetchingHashes) == 1
|
return atomic.LoadInt32(&d.fetchingHashes) == 1
|
||||||
}
|
}
|
||||||
|
@ -470,12 +502,8 @@ func (d *Downloader) isDownloadingBlocks() bool {
|
||||||
return atomic.LoadInt32(&d.downloadingBlocks) == 1
|
return atomic.LoadInt32(&d.downloadingBlocks) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Downloader) isProcessing() bool {
|
|
||||||
return atomic.LoadInt32(&d.processingBlocks) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) isBusy() bool {
|
func (d *Downloader) isBusy() bool {
|
||||||
return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing()
|
return d.isFetchingHashes() || d.isDownloadingBlocks()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Downloader) IsBusy() bool {
|
func (d *Downloader) IsBusy() bool {
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
var knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
|
@ -25,36 +23,47 @@ func createHashes(start, amount int) (hashes []common.Hash) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createBlock(i int, prevHash, hash common.Hash) *types.Block {
|
||||||
|
header := &types.Header{Number: big.NewInt(int64(i))}
|
||||||
|
block := types.NewBlockWithHeader(header)
|
||||||
|
block.HeaderHash = hash
|
||||||
|
block.ParentHeaderHash = knownHash
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
||||||
blocks := make(map[common.Hash]*types.Block)
|
blocks := make(map[common.Hash]*types.Block)
|
||||||
|
|
||||||
for i, hash := range hashes {
|
for i, hash := range hashes {
|
||||||
header := &types.Header{Number: big.NewInt(int64(i))}
|
blocks[hash] = createBlock(len(hashes)-i, knownHash, hash)
|
||||||
blocks[hash] = types.NewBlockWithHeader(header)
|
|
||||||
blocks[hash].HeaderHash = hash
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return blocks
|
return blocks
|
||||||
}
|
}
|
||||||
|
|
||||||
type downloadTester struct {
|
type downloadTester struct {
|
||||||
downloader *Downloader
|
downloader *Downloader
|
||||||
hashes []common.Hash
|
hashes []common.Hash
|
||||||
blocks map[common.Hash]*types.Block
|
blocks map[common.Hash]*types.Block
|
||||||
t *testing.T
|
t *testing.T
|
||||||
pcount int
|
pcount int
|
||||||
done chan bool
|
done chan bool
|
||||||
|
activePeerId string
|
||||||
insertedBlocks int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
|
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
|
||||||
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
|
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
|
||||||
downloader := New(tester.hasBlock, tester.insertChain)
|
downloader := New(tester.hasBlock, tester.getBlock)
|
||||||
tester.downloader = downloader
|
tester.downloader = downloader
|
||||||
|
|
||||||
return tester
|
return tester
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dl *downloadTester) sync(peerId string, hash common.Hash) error {
|
||||||
|
dl.activePeerId = peerId
|
||||||
|
return dl.downloader.Synchronise(peerId, hash)
|
||||||
|
}
|
||||||
|
|
||||||
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
||||||
if knownHash == hash {
|
if knownHash == hash {
|
||||||
return true
|
return true
|
||||||
|
@ -62,14 +71,12 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
|
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
||||||
dl.insertedBlocks += len(blocks)
|
return dl.blocks[knownHash]
|
||||||
|
|
||||||
return 0, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *downloadTester) getHashes(hash common.Hash) error {
|
func (dl *downloadTester) getHashes(hash common.Hash) error {
|
||||||
dl.downloader.hashCh <- dl.hashes
|
dl.downloader.AddHashes(dl.activePeerId, dl.hashes)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,9 +109,55 @@ func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDownload(t *testing.T) {
|
func TestDownload(t *testing.T) {
|
||||||
glog.SetV(logger.Detail)
|
minDesiredPeerCount = 4
|
||||||
glog.SetToStderr(true)
|
blockTtl = 1 * time.Second
|
||||||
|
|
||||||
|
targetBlocks := 1000
|
||||||
|
hashes := createHashes(0, targetBlocks)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
tester := newTester(t, hashes, blocks)
|
||||||
|
|
||||||
|
tester.newPeer("peer1", big.NewInt(10000), hashes[0])
|
||||||
|
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
|
||||||
|
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
|
||||||
|
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
|
||||||
|
tester.activePeerId = "peer1"
|
||||||
|
|
||||||
|
err := tester.sync("peer1", hashes[0])
|
||||||
|
if err != nil {
|
||||||
|
t.Error("download error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inqueue := len(tester.downloader.queue.blocks)
|
||||||
|
if inqueue != targetBlocks {
|
||||||
|
t.Error("expected", targetBlocks, "have", inqueue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMissing(t *testing.T) {
|
||||||
|
targetBlocks := 1000
|
||||||
|
hashes := createHashes(0, 1000)
|
||||||
|
extraHashes := createHashes(1001, 1003)
|
||||||
|
blocks := createBlocksFromHashes(append(extraHashes, hashes...))
|
||||||
|
tester := newTester(t, hashes, blocks)
|
||||||
|
|
||||||
|
tester.newPeer("peer1", big.NewInt(10000), hashes[len(hashes)-1])
|
||||||
|
|
||||||
|
hashes = append(extraHashes, hashes[:len(hashes)-1]...)
|
||||||
|
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
|
||||||
|
|
||||||
|
err := tester.sync("peer1", hashes[0])
|
||||||
|
if err != nil {
|
||||||
|
t.Error("download error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inqueue := len(tester.downloader.queue.blocks)
|
||||||
|
if inqueue != targetBlocks {
|
||||||
|
t.Error("expected", targetBlocks, "have", inqueue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTaking(t *testing.T) {
|
||||||
minDesiredPeerCount = 4
|
minDesiredPeerCount = 4
|
||||||
blockTtl = 1 * time.Second
|
blockTtl = 1 * time.Second
|
||||||
|
|
||||||
|
@ -118,37 +171,13 @@ func TestDownload(t *testing.T) {
|
||||||
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
|
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
|
||||||
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
|
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
|
||||||
|
|
||||||
err := tester.downloader.Synchronise("peer1", hashes[0])
|
err := tester.sync("peer1", hashes[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("download error", err)
|
t.Error("download error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if tester.insertedBlocks != targetBlocks {
|
bs1 := tester.downloader.TakeBlocks()
|
||||||
t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
|
if len(bs1) != 1000 {
|
||||||
}
|
t.Error("expected to take 1000, got", len(bs1))
|
||||||
}
|
|
||||||
|
|
||||||
func TestMissing(t *testing.T) {
|
|
||||||
glog.SetV(logger.Detail)
|
|
||||||
glog.SetToStderr(true)
|
|
||||||
|
|
||||||
targetBlocks := 1000
|
|
||||||
hashes := createHashes(0, 1000)
|
|
||||||
extraHashes := createHashes(1001, 1003)
|
|
||||||
blocks := createBlocksFromHashes(append(extraHashes, hashes...))
|
|
||||||
tester := newTester(t, hashes, blocks)
|
|
||||||
|
|
||||||
tester.newPeer("peer1", big.NewInt(10000), hashes[len(hashes)-1])
|
|
||||||
|
|
||||||
hashes = append(extraHashes, hashes[:len(hashes)-1]...)
|
|
||||||
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
|
|
||||||
|
|
||||||
err := tester.downloader.Synchronise("peer1", hashes[0])
|
|
||||||
if err != nil {
|
|
||||||
t.Error("download error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tester.insertedBlocks != targetBlocks {
|
|
||||||
t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package downloader
|
package downloader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -18,7 +19,9 @@ type queue struct {
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
fetching map[string]*chunk
|
fetching map[string]*chunk
|
||||||
blocks []*types.Block
|
|
||||||
|
blockOffset int
|
||||||
|
blocks []*types.Block
|
||||||
}
|
}
|
||||||
|
|
||||||
func newqueue() *queue {
|
func newqueue() *queue {
|
||||||
|
@ -34,6 +37,10 @@ func (c *queue) reset() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
c.resetNoTS()
|
||||||
|
}
|
||||||
|
func (c *queue) resetNoTS() {
|
||||||
|
c.blockOffset = 0
|
||||||
c.hashPool.Clear()
|
c.hashPool.Clear()
|
||||||
c.fetchPool.Clear()
|
c.fetchPool.Clear()
|
||||||
c.blockHashes.Clear()
|
c.blockHashes.Clear()
|
||||||
|
@ -41,6 +48,10 @@ func (c *queue) reset() {
|
||||||
c.fetching = make(map[string]*chunk)
|
c.fetching = make(map[string]*chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *queue) size() int {
|
||||||
|
return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size()
|
||||||
|
}
|
||||||
|
|
||||||
// reserve a `max` set of hashes for `p` peer.
|
// reserve a `max` set of hashes for `p` peer.
|
||||||
func (c *queue) get(p *peer, max int) *chunk {
|
func (c *queue) get(p *peer, max int) *chunk {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
@ -89,22 +100,27 @@ func (c *queue) get(p *peer, max int) *chunk {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *queue) has(hash common.Hash) bool {
|
func (c *queue) has(hash common.Hash) bool {
|
||||||
return c.hashPool.Has(hash) || c.fetchPool.Has(hash)
|
return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *queue) addBlock(id string, block *types.Block) {
|
func (c *queue) getBlock(hash common.Hash) *types.Block {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
// when adding a block make sure it doesn't already exist
|
if !c.blockHashes.Has(hash) {
|
||||||
if !c.blockHashes.Has(block.Hash()) {
|
return nil
|
||||||
c.hashPool.Remove(block.Hash())
|
|
||||||
c.blocks = append(c.blocks, block)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, block := range c.blocks {
|
||||||
|
if block.Hash() == hash {
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// deliver delivers a chunk to the queue that was requested of the peer
|
// deliver delivers a chunk to the queue that was requested of the peer
|
||||||
func (c *queue) deliver(id string, blocks []*types.Block) {
|
func (c *queue) deliver(id string, blocks []*types.Block) (err error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
@ -119,16 +135,45 @@ func (c *queue) deliver(id string, blocks []*types.Block) {
|
||||||
chunk.peer.ignored.Merge(chunk.hashes)
|
chunk.peer.ignored.Merge(chunk.hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add the blocks
|
||||||
|
for i, block := range blocks {
|
||||||
|
// See (1) for future limitation
|
||||||
|
n := int(block.NumberU64()) - c.blockOffset
|
||||||
|
if n > len(c.blocks) || n < 0 {
|
||||||
|
// set the error and set the blocks which could be processed
|
||||||
|
// abort the rest of the blocks (FIXME this could be improved)
|
||||||
|
err = fmt.Errorf("received block which overflow (N=%v O=%v)", block.Number(), c.blockOffset)
|
||||||
|
blocks = blocks[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.blocks[n] = block
|
||||||
|
}
|
||||||
// seperate the blocks and the hashes
|
// seperate the blocks and the hashes
|
||||||
blockHashes := chunk.fetchedHashes(blocks)
|
blockHashes := chunk.fetchedHashes(blocks)
|
||||||
// merge block hashes
|
// merge block hashes
|
||||||
c.blockHashes.Merge(blockHashes)
|
c.blockHashes.Merge(blockHashes)
|
||||||
// Add the blocks
|
|
||||||
c.blocks = append(c.blocks, blocks...)
|
|
||||||
// Add back whatever couldn't be delivered
|
// Add back whatever couldn't be delivered
|
||||||
c.hashPool.Merge(chunk.hashes)
|
c.hashPool.Merge(chunk.hashes)
|
||||||
|
// Remove the hashes from the fetch pool
|
||||||
c.fetchPool.Separate(chunk.hashes)
|
c.fetchPool.Separate(chunk.hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *queue) alloc(offset, size int) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.blockOffset < offset {
|
||||||
|
c.blockOffset = offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// (1) XXX at some point we could limit allocation to memory and use the disk
|
||||||
|
// to store future blocks.
|
||||||
|
if len(c.blocks) < size {
|
||||||
|
c.blocks = append(c.blocks, make([]*types.Block, size)...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// puts puts sets of hashes on to the queue for fetching
|
// puts puts sets of hashes on to the queue for fetching
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"gopkg.in/fatih/set.v0"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createHashSet(hashes []common.Hash) *set.Set {
|
||||||
|
hset := set.New()
|
||||||
|
|
||||||
|
for _, hash := range hashes {
|
||||||
|
hset.Add(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hset
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBlocksFromHashSet(hashes *set.Set) []*types.Block {
|
||||||
|
blocks := make([]*types.Block, hashes.Size())
|
||||||
|
|
||||||
|
var i int
|
||||||
|
hashes.Each(func(v interface{}) bool {
|
||||||
|
blocks[i] = createBlock(i, common.Hash{}, v.(common.Hash))
|
||||||
|
i++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunking(t *testing.T) {
|
||||||
|
queue := newqueue()
|
||||||
|
peer1 := newPeer("peer1", common.Hash{}, nil, nil)
|
||||||
|
peer2 := newPeer("peer2", common.Hash{}, nil, nil)
|
||||||
|
|
||||||
|
// 99 + 1 (1 == known genesis hash)
|
||||||
|
hashes := createHashes(0, 99)
|
||||||
|
hashSet := createHashSet(hashes)
|
||||||
|
queue.put(hashSet)
|
||||||
|
|
||||||
|
chunk1 := queue.get(peer1, 99)
|
||||||
|
if chunk1 == nil {
|
||||||
|
t.Errorf("chunk1 is nil")
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
chunk2 := queue.get(peer2, 99)
|
||||||
|
if chunk2 == nil {
|
||||||
|
t.Errorf("chunk2 is nil")
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk1.hashes.Size() != 99 {
|
||||||
|
t.Error("expected chunk1 hashes to be 99, got", chunk1.hashes.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk2.hashes.Size() != 1 {
|
||||||
|
t.Error("expected chunk1 hashes to be 1, got", chunk2.hashes.Size())
|
||||||
|
}
|
||||||
|
}
|
159
eth/handler.go
159
eth/handler.go
|
@ -1,39 +1,5 @@
|
||||||
package eth
|
package eth
|
||||||
|
|
||||||
// XXX Fair warning, most of the code is re-used from the old protocol. Please be aware that most of this will actually change
|
|
||||||
// The idea is that most of the calls within the protocol will become synchronous.
|
|
||||||
// Block downloading and block processing will be complete seperate processes
|
|
||||||
/*
|
|
||||||
# Possible scenarios
|
|
||||||
|
|
||||||
// Synching scenario
|
|
||||||
// Use the best peer to synchronise
|
|
||||||
blocks, err := pm.downloader.Synchronise()
|
|
||||||
if err != nil {
|
|
||||||
// handle
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pm.chainman.InsertChain(blocks)
|
|
||||||
|
|
||||||
// Receiving block with known parent
|
|
||||||
if parent_exist {
|
|
||||||
if err := pm.chainman.InsertChain(block); err != nil {
|
|
||||||
// handle
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pm.BroadcastBlock(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Receiving block with unknown parent
|
|
||||||
blocks, err := pm.downloader.SynchroniseWithPeer(peer)
|
|
||||||
if err != nil {
|
|
||||||
// handle
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pm.chainman.InsertChain(blocks)
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
@ -54,7 +20,9 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
|
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
|
||||||
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
blockProcTimer = 500 * time.Millisecond
|
||||||
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
||||||
|
blockProcAmount = 256
|
||||||
)
|
)
|
||||||
|
|
||||||
func errResp(code errCode, format string, v ...interface{}) error {
|
func errResp(code errCode, format string, v ...interface{}) error {
|
||||||
|
@ -91,6 +59,10 @@ type ProtocolManager struct {
|
||||||
|
|
||||||
newPeerCh chan *peer
|
newPeerCh chan *peer
|
||||||
quitSync chan struct{}
|
quitSync chan struct{}
|
||||||
|
// wait group is used for graceful shutdowns during downloading
|
||||||
|
// and processing
|
||||||
|
wg sync.WaitGroup
|
||||||
|
quit bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
|
@ -122,60 +94,11 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) syncHandler() {
|
func (pm *ProtocolManager) removePeer(peer *peer) {
|
||||||
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
|
pm.pmu.Lock()
|
||||||
itimer := time.NewTimer(peerCountTimeout)
|
defer pm.pmu.Unlock()
|
||||||
out:
|
pm.downloader.UnregisterPeer(peer.id)
|
||||||
for {
|
delete(pm.peers, peer.id)
|
||||||
select {
|
|
||||||
case <-pm.newPeerCh:
|
|
||||||
// Meet the `minDesiredPeerCount` before we select our best peer
|
|
||||||
if len(pm.peers) < minDesiredPeerCount {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the best peer
|
|
||||||
peer := getBestPeer(pm.peers)
|
|
||||||
if peer == nil {
|
|
||||||
glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
|
|
||||||
}
|
|
||||||
|
|
||||||
itimer.Stop()
|
|
||||||
go pm.synchronise(peer)
|
|
||||||
case <-itimer.C:
|
|
||||||
// The timer will make sure that the downloader keeps an active state
|
|
||||||
// in which it attempts to always check the network for highest td peers
|
|
||||||
// Either select the peer or restart the timer if no peers could
|
|
||||||
// be selected.
|
|
||||||
if peer := getBestPeer(pm.peers); peer != nil {
|
|
||||||
go pm.synchronise(peer)
|
|
||||||
} else {
|
|
||||||
itimer.Reset(5 * time.Second)
|
|
||||||
}
|
|
||||||
case <-pm.quitSync:
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pm *ProtocolManager) synchronise(peer *peer) {
|
|
||||||
// Make sure the peer's TD is higher than our own. If not drop.
|
|
||||||
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Check downloader if it's busy so it doesn't show the sync message
|
|
||||||
// for every attempty
|
|
||||||
if pm.downloader.IsBusy() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
|
|
||||||
// Get the hashes from the peer (synchronously)
|
|
||||||
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
|
|
||||||
if err != nil {
|
|
||||||
// handle error
|
|
||||||
glog.V(logger.Debug).Infoln("error downloading:", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) Start() {
|
func (pm *ProtocolManager) Start() {
|
||||||
|
@ -187,18 +110,26 @@ func (pm *ProtocolManager) Start() {
|
||||||
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
|
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
|
||||||
go pm.minedBroadcastLoop()
|
go pm.minedBroadcastLoop()
|
||||||
|
|
||||||
// sync handler
|
go pm.update()
|
||||||
go pm.syncHandler()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) Stop() {
|
func (pm *ProtocolManager) Stop() {
|
||||||
|
// Showing a log message. During download / process this could actually
|
||||||
|
// take between 5 to 10 seconds and therefor feedback is required.
|
||||||
|
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
|
||||||
|
|
||||||
|
pm.quit = true
|
||||||
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
pm.txSub.Unsubscribe() // quits txBroadcastLoop
|
||||||
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
|
||||||
close(pm.quitSync) // quits the sync handler
|
close(pm.quitSync) // quits the sync handler
|
||||||
|
|
||||||
|
// Wait for any process action
|
||||||
|
pm.wg.Wait()
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||||
|
|
||||||
td, current, genesis := pm.chainman.Status()
|
td, current, genesis := pm.chainman.Status()
|
||||||
|
|
||||||
return newPeer(pv, nv, genesis, current, td, p, rw)
|
return newPeer(pv, nv, genesis, current, td, p, rw)
|
||||||
|
@ -214,10 +145,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||||
|
|
||||||
pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
|
pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
|
||||||
defer func() {
|
defer func() {
|
||||||
pm.pmu.Lock()
|
pm.removePeer(p)
|
||||||
defer pm.pmu.Unlock()
|
|
||||||
delete(pm.peers, p.id)
|
|
||||||
pm.downloader.UnregisterPeer(p.id)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// propagate existing transactions. new transactions appearing
|
// propagate existing transactions. new transactions appearing
|
||||||
|
@ -352,6 +280,9 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
|
||||||
// Add the block hash as a known hash to the peer. This will later be used to determine
|
// Add the block hash as a known hash to the peer. This will later be used to determine
|
||||||
// who should receive this.
|
// who should receive this.
|
||||||
p.blockHashes.Add(hash)
|
p.blockHashes.Add(hash)
|
||||||
|
// update the peer info
|
||||||
|
p.recentHash = hash
|
||||||
|
p.td = request.TD
|
||||||
|
|
||||||
_, chainHead, _ := self.chainman.Status()
|
_, chainHead, _ := self.chainman.Status()
|
||||||
|
|
||||||
|
@ -376,24 +307,24 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
|
||||||
|
|
||||||
// Attempt to insert the newly received by checking if the parent exists.
|
// Attempt to insert the newly received by checking if the parent exists.
|
||||||
// if the parent exists we process the block and propagate to our peers
|
// if the parent exists we process the block and propagate to our peers
|
||||||
// if the parent does not exists we delegate to the downloader.
|
// otherwise synchronise with the peer
|
||||||
if self.chainman.HasBlock(request.Block.ParentHash()) {
|
if self.chainman.HasBlock(request.Block.ParentHash()) {
|
||||||
if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
|
if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
|
||||||
// handle error
|
glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error")
|
||||||
|
|
||||||
|
self.removePeer(p)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := self.verifyTd(p, request); err != nil {
|
||||||
|
glog.V(logger.Error).Infoln(err)
|
||||||
|
// XXX for now return nil so it won't disconnect (we should in the future)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
self.BroadcastBlock(hash, request.Block)
|
self.BroadcastBlock(hash, request.Block)
|
||||||
} else {
|
} else {
|
||||||
// adding blocks is synchronous
|
go self.synchronise(p)
|
||||||
go func() {
|
|
||||||
// TODO check parent error
|
|
||||||
err := self.downloader.AddBlock(p.id, request.Block, request.TD)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(logger.Detail).Infoln("downloader err:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
self.BroadcastBlock(hash, request.Block)
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
|
||||||
|
@ -401,6 +332,16 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pm *ProtocolManager) verifyTd(peer *peer, request newBlockMsgData) error {
|
||||||
|
if request.Block.Td.Cmp(request.TD) != 0 {
|
||||||
|
glog.V(logger.Detail).Infoln(peer)
|
||||||
|
|
||||||
|
return fmt.Errorf("invalid TD on block(%v) from peer(%s): block.td=%v, request.td=%v", request.Block.Number(), peer.id, request.Block.Td, request.TD)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// BroadcastBlock will propagate the block to its connected peers. It will sort
|
// BroadcastBlock will propagate the block to its connected peers. It will sort
|
||||||
// out which peers do not contain the block in their block set and will do a
|
// out which peers do not contain the block in their block set and will do a
|
||||||
// sqrt(peers) to determine the amount of peers we broadcast to.
|
// sqrt(peers) to determine the amount of peers we broadcast to.
|
||||||
|
@ -421,7 +362,7 @@ func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block)
|
||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
peer.sendNewBlock(block)
|
peer.sendNewBlock(block)
|
||||||
}
|
}
|
||||||
glog.V(logger.Detail).Infoln("broadcast block to", len(peers), "peers. Total propagation time:", time.Since(block.ReceivedAt))
|
glog.V(logger.Detail).Infoln("broadcast block to", len(peers), "peers. Total processing time:", time.Since(block.ReceivedAt))
|
||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastTx will propagate the block to its connected peers. It will sort
|
// BroadcastTx will propagate the block to its connected peers. It will sort
|
||||||
|
|
|
@ -0,0 +1,108 @@
|
||||||
|
package eth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sync contains all synchronisation code for the eth protocol
|
||||||
|
|
||||||
|
func (pm *ProtocolManager) update() {
|
||||||
|
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
|
||||||
|
itimer := time.NewTimer(peerCountTimeout)
|
||||||
|
// btimer is used for picking of blocks from the downloader
|
||||||
|
btimer := time.Tick(blockProcTimer)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pm.newPeerCh:
|
||||||
|
// Meet the `minDesiredPeerCount` before we select our best peer
|
||||||
|
if len(pm.peers) < minDesiredPeerCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the best peer
|
||||||
|
peer := getBestPeer(pm.peers)
|
||||||
|
if peer == nil {
|
||||||
|
glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
|
||||||
|
}
|
||||||
|
|
||||||
|
itimer.Stop()
|
||||||
|
go pm.synchronise(peer)
|
||||||
|
case <-itimer.C:
|
||||||
|
// The timer will make sure that the downloader keeps an active state
|
||||||
|
// in which it attempts to always check the network for highest td peers
|
||||||
|
// Either select the peer or restart the timer if no peers could
|
||||||
|
// be selected.
|
||||||
|
if peer := getBestPeer(pm.peers); peer != nil {
|
||||||
|
go pm.synchronise(peer)
|
||||||
|
} else {
|
||||||
|
itimer.Reset(5 * time.Second)
|
||||||
|
}
|
||||||
|
case <-btimer:
|
||||||
|
go pm.processBlocks()
|
||||||
|
case <-pm.quitSync:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processBlocks will attempt to reconstruct a chain by checking the first item and check if it's
|
||||||
|
// a known parent. The first block in the chain may be unknown during downloading. When the
|
||||||
|
// downloader isn't downloading blocks will be dropped with an unknown parent until either it
|
||||||
|
// has depleted the list or found a known parent.
|
||||||
|
func (pm *ProtocolManager) processBlocks() error {
|
||||||
|
pm.wg.Add(1)
|
||||||
|
defer pm.wg.Done()
|
||||||
|
|
||||||
|
blocks := pm.downloader.TakeBlocks()
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer pm.downloader.Done()
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number())
|
||||||
|
|
||||||
|
for len(blocks) != 0 && !pm.quit {
|
||||||
|
max := int(math.Min(float64(len(blocks)), float64(blockProcAmount)))
|
||||||
|
_, err := pm.chainman.InsertChain(blocks[:max])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blocks = blocks[max:]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||||
|
// Make sure the peer's TD is higher than our own. If not drop.
|
||||||
|
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Check downloader if it's busy so it doesn't show the sync message
|
||||||
|
// for every attempty
|
||||||
|
if pm.downloader.IsBusy() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME if we have the hash in our chain and the TD of the peer is
|
||||||
|
// much higher than ours, something is wrong with us or the peer.
|
||||||
|
// Check if the hash is on our own chain
|
||||||
|
if pm.chainman.HasBlock(peer.recentHash) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the hashes from the peer (synchronously)
|
||||||
|
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
|
||||||
|
if err != nil && err == downloader.ErrBadPeer {
|
||||||
|
glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action")
|
||||||
|
pm.removePeer(peer)
|
||||||
|
} else if err != nil {
|
||||||
|
// handle error
|
||||||
|
glog.V(logger.Detail).Infoln("error downloading:", err)
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue