Merge pull request #805 from obscuren/download_improvements

eth, eth/downloader: improve downloader and remove asynchronousness
This commit is contained in:
Jeffrey Wilcke 2015-04-24 15:56:17 -07:00
commit 8d09f95bc7
17 changed files with 199 additions and 233 deletions

View File

@ -47,7 +47,7 @@ import _ "net/http/pprof"
const ( const (
ClientIdentifier = "Geth" ClientIdentifier = "Geth"
Version = "0.9.11" Version = "0.9.12"
) )
var app = utils.NewApp(Version, "the go-ethereum command line interface") var app = utils.NewApp(Version, "the go-ethereum command line interface")

View File

@ -317,7 +317,7 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat
eventMux := new(event.TypeMux) eventMux := new(event.TypeMux)
chainManager := core.NewChainManager(blockDb, stateDb, eventMux) chainManager := core.NewChainManager(blockDb, stateDb, eventMux)
pow := ethash.New(chainManager) pow := ethash.New(chainManager)
txPool := core.NewTxPool(eventMux, chainManager.State) txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit)
blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux) blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux)
chainManager.SetProcessor(blockProcessor) chainManager.SetProcessor(blockProcessor)

View File

@ -284,6 +284,7 @@ func (self *testFrontend) testResolver() *resolver.Resolver {
} }
func TestNatspecE2E(t *testing.T) { func TestNatspecE2E(t *testing.T) {
t.Skip()
tf := testInit(t) tf := testInit(t)
defer tf.ethereum.Stop() defer tf.ethereum.Stop()

View File

@ -124,7 +124,7 @@ func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Data
// block processor with fake pow // block processor with fake pow
func newBlockProcessor(db common.Database, cman *ChainManager, eventMux *event.TypeMux) *BlockProcessor { func newBlockProcessor(db common.Database, cman *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
chainMan := newChainManager(nil, eventMux, db) chainMan := newChainManager(nil, eventMux, db)
txpool := NewTxPool(eventMux, chainMan.State) txpool := NewTxPool(eventMux, chainMan.State, chainMan.GasLimit)
bman := NewBlockProcessor(db, db, FakePow{}, txpool, chainMan, eventMux) bman := NewBlockProcessor(db, db, FakePow{}, txpool, chainMan, eventMux)
return bman return bman
} }

View File

@ -78,11 +78,12 @@ type ChainManager struct {
eventMux *event.TypeMux eventMux *event.TypeMux
genesisBlock *types.Block genesisBlock *types.Block
// Last known total difficulty // Last known total difficulty
mu sync.RWMutex mu sync.RWMutex
tsmu sync.RWMutex tsmu sync.RWMutex
td *big.Int td *big.Int
currentBlock *types.Block currentBlock *types.Block
lastBlockHash common.Hash lastBlockHash common.Hash
currentGasLimit *big.Int
transState *state.StateDB transState *state.StateDB
txState *state.ManagedState txState *state.ManagedState
@ -95,12 +96,13 @@ type ChainManager struct {
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager { func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
bc := &ChainManager{ bc := &ChainManager{
blockDb: blockDb, blockDb: blockDb,
stateDb: stateDb, stateDb: stateDb,
genesisBlock: GenesisBlock(stateDb), genesisBlock: GenesisBlock(stateDb),
eventMux: mux, eventMux: mux,
quit: make(chan struct{}), quit: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit), cache: NewBlockCache(blockCacheLimit),
currentGasLimit: new(big.Int),
} }
bc.setLastBlock() bc.setLastBlock()
@ -157,6 +159,10 @@ func (self *ChainManager) Td() *big.Int {
return self.td return self.td
} }
func (self *ChainManager) GasLimit() *big.Int {
return self.currentGasLimit
}
func (self *ChainManager) LastBlockHash() common.Hash { func (self *ChainManager) LastBlockHash() common.Hash {
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
@ -652,6 +658,7 @@ out:
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary. // and in most cases isn't even necessary.
if i+1 == ev.canonicalCount { if i+1 == ev.canonicalCount {
self.currentGasLimit = CalcGasLimit(self.GetBlock(event.Block.ParentHash()), event.Block)
self.eventMux.Post(ChainHeadEvent{event.Block}) self.eventMux.Post(ChainHeadEvent{event.Block})
} }
case ChainSplitEvent: case ChainSplitEvent:

View File

@ -256,7 +256,7 @@ func TestChainInsertions(t *testing.T) {
var eventMux event.TypeMux var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux) chainMan := NewChainManager(db, db, &eventMux)
txPool := NewTxPool(&eventMux, chainMan.State) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan) chainMan.SetProcessor(blockMan)
@ -302,7 +302,7 @@ func TestChainMultipleInsertions(t *testing.T) {
} }
var eventMux event.TypeMux var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux) chainMan := NewChainManager(db, db, &eventMux)
txPool := NewTxPool(&eventMux, chainMan.State) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan) chainMan.SetProcessor(blockMan)
done := make(chan bool, max) done := make(chan bool, max)

View File

@ -23,6 +23,7 @@ var (
ErrNonExistentAccount = errors.New("Account does not exist") ErrNonExistentAccount = errors.New("Account does not exist")
ErrInsufficientFunds = errors.New("Insufficient funds") ErrInsufficientFunds = errors.New("Insufficient funds")
ErrIntrinsicGas = errors.New("Intrinsic gas too low") ErrIntrinsicGas = errors.New("Intrinsic gas too low")
ErrGasLimit = errors.New("Exceeds block gas limit")
) )
const txPoolQueueSize = 50 const txPoolQueueSize = 50
@ -52,6 +53,8 @@ type TxPool struct {
quit chan bool quit chan bool
// The state function which will allow us to do some pre checkes // The state function which will allow us to do some pre checkes
currentState stateFn currentState stateFn
// The current gas limit function callback
gasLimit func() *big.Int
// The actual pool // The actual pool
txs map[common.Hash]*types.Transaction txs map[common.Hash]*types.Transaction
invalidHashes *set.Set invalidHashes *set.Set
@ -63,7 +66,7 @@ type TxPool struct {
eventMux *event.TypeMux eventMux *event.TypeMux
} }
func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn) *TxPool { func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
txPool := &TxPool{ txPool := &TxPool{
txs: make(map[common.Hash]*types.Transaction), txs: make(map[common.Hash]*types.Transaction),
queue: make(map[common.Address]types.Transactions), queue: make(map[common.Address]types.Transactions),
@ -72,6 +75,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn) *TxPool {
eventMux: eventMux, eventMux: eventMux,
invalidHashes: set.New(), invalidHashes: set.New(),
currentState: currentStateFn, currentState: currentStateFn,
gasLimit: gasLimitFn,
} }
return txPool return txPool
} }
@ -116,6 +120,10 @@ func (pool *TxPool) ValidateTransaction(tx *types.Transaction) error {
return ErrNonExistentAccount return ErrNonExistentAccount
} }
if pool.gasLimit().Cmp(tx.GasLimit) < 0 {
return ErrGasLimit
}
if pool.currentState().GetBalance(from).Cmp(new(big.Int).Mul(tx.Price, tx.GasLimit)) < 0 { if pool.currentState().GetBalance(from).Cmp(new(big.Int).Mul(tx.Price, tx.GasLimit)) < 0 {
return ErrInsufficientFunds return ErrInsufficientFunds
} }

View File

@ -23,7 +23,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
var m event.TypeMux var m event.TypeMux
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
return NewTxPool(&m, func() *state.StateDB { return statedb }), key return NewTxPool(&m, func() *state.StateDB { return statedb }, func() *big.Int { return big.NewInt(1000000) }), key
} }
func TestInvalidTransactions(t *testing.T) { func TestInvalidTransactions(t *testing.T) {

View File

@ -217,9 +217,9 @@ func New(config *Config) (*Ethereum, error) {
} }
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux()) eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain, eth.chainManager.Td) eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain)
eth.pow = ethash.New(eth.chainManager) eth.pow = ethash.New(eth.chainManager)
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor) eth.chainManager.SetProcessor(eth.blockProcessor)
eth.whisper = whisper.New() eth.whisper = whisper.New()
@ -447,7 +447,7 @@ func (self *Ethereum) SuggestPeer(nodeURL string) error {
} }
func (s *Ethereum) Stop() { func (s *Ethereum) Stop() {
s.txSub.Unsubscribe() // quits txBroadcastLoop s.txSub.Unsubscribe() // quits txBroadcastLoop
s.protocolManager.Stop() s.protocolManager.Stop()
s.txPool.Stop() s.txPool.Stop()

View File

@ -39,7 +39,6 @@ var (
type hashCheckFn func(common.Hash) bool type hashCheckFn func(common.Hash) bool
type chainInsertFn func(types.Blocks) error type chainInsertFn func(types.Blocks) error
type hashIterFn func() (common.Hash, error) type hashIterFn func() (common.Hash, error)
type currentTdFn func() *big.Int
type blockPack struct { type blockPack struct {
peerId string peerId string
@ -61,7 +60,6 @@ type Downloader struct {
// Callbacks // Callbacks
hasBlock hashCheckFn hasBlock hashCheckFn
insertChain chainInsertFn insertChain chainInsertFn
currentTd currentTdFn
// Status // Status
fetchingHashes int32 fetchingHashes int32
@ -70,27 +68,20 @@ type Downloader struct {
// Channels // Channels
newPeerCh chan *peer newPeerCh chan *peer
syncCh chan syncPack
hashCh chan []common.Hash hashCh chan []common.Hash
blockCh chan blockPack blockCh chan blockPack
quit chan struct{}
} }
func New(hasBlock hashCheckFn, insertChain chainInsertFn, currentTd currentTdFn) *Downloader { func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader {
downloader := &Downloader{ downloader := &Downloader{
queue: newqueue(), queue: newqueue(),
peers: make(peers), peers: make(peers),
hasBlock: hasBlock, hasBlock: hasBlock,
insertChain: insertChain, insertChain: insertChain,
currentTd: currentTd,
newPeerCh: make(chan *peer, 1), newPeerCh: make(chan *peer, 1),
syncCh: make(chan syncPack, 1),
hashCh: make(chan []common.Hash, 1), hashCh: make(chan []common.Hash, 1),
blockCh: make(chan blockPack, 1), blockCh: make(chan blockPack, 1),
quit: make(chan struct{}),
} }
go downloader.peerHandler()
go downloader.update()
return downloader return downloader
} }
@ -99,18 +90,17 @@ func (d *Downloader) Stats() (current int, max int) {
return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size() return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size()
} }
func (d *Downloader) RegisterPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() defer d.mu.Unlock()
glog.V(logger.Detail).Infoln("Register peer", id, "TD =", td) glog.V(logger.Detail).Infoln("Register peer", id)
// Create a new peer and add it to the list of known peers // Create a new peer and add it to the list of known peers
peer := newPeer(id, td, hash, getHashes, getBlocks) peer := newPeer(id, hash, getHashes, getBlocks)
// add peer to our peer set // add peer to our peer set
d.peers[id] = peer d.peers[id] = peer
// broadcast new peer // broadcast new peer
d.newPeerCh <- peer
return nil return nil
} }
@ -125,72 +115,59 @@ func (d *Downloader) UnregisterPeer(id string) {
delete(d.peers, id) delete(d.peers, id)
} }
func (d *Downloader) peerHandler() { // SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
// itimer is used to determine when to start ignoring `minDesiredPeerCount` // it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
itimer := time.NewTimer(peerCountTimeout) // checks fail an error will be returned. This method is synchronous
out: func (d *Downloader) Synchronise(id string, hash common.Hash) error {
for {
select {
case <-d.newPeerCh:
// Meet the `minDesiredPeerCount` before we select our best peer
if len(d.peers) < minDesiredPeerCount {
break
}
itimer.Stop()
d.selectPeer(d.peers.bestPeer())
case <-itimer.C:
// The timer will make sure that the downloader keeps an active state
// in which it attempts to always check the network for highest td peers
// Either select the peer or restart the timer if no peers could
// be selected.
if peer := d.peers.bestPeer(); peer != nil {
d.selectPeer(d.peers.bestPeer())
} else {
itimer.Reset(5 * time.Second)
}
case <-d.quit:
break out
}
}
}
func (d *Downloader) selectPeer(p *peer) {
// Make sure it's doing neither. Once done we can restart the // Make sure it's doing neither. Once done we can restart the
// downloading process if the TD is higher. For now just get on // downloading process if the TD is higher. For now just get on
// with whatever is going on. This prevents unecessary switching. // with whatever is going on. This prevents unecessary switching.
if d.isBusy() { if d.isBusy() {
return return errBusy
}
// selected peer must be better than our own
// XXX we also check the peer's recent hash to make sure we
// don't have it. Some peers report (i think) incorrect TD.
if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
return
} }
glog.V(logger.Detail).Infoln("New peer with highest TD =", p.td) // Fetch the peer using the id or throw an error if the peer couldn't be found
d.syncCh <- syncPack{p, p.recentHash, false} p := d.peers[id]
if p == nil {
return errUnknownPeer
}
// Get the hash from the peer and initiate the downloading progress.
err := d.getFromPeer(p, hash, false)
if err != nil {
return err
}
return d.process(p)
} }
func (d *Downloader) update() { func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
out: d.activePeer = p.id
for {
select {
case sync := <-d.syncCh:
var peer *peer = sync.peer
err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial)
if err != nil {
glog.V(logger.Detail).Infoln(err)
break
}
d.process() glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
case <-d.quit: // Start the fetcher. This will block the update entirely
break out // interupts need to be send to the appropriate channels
} // respectively.
if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
// handle error
glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
// XXX Reset
return err
} }
// Start fetching blocks in paralel. The strategy is simple
// take any available peers, seserve a chunk for each peer available,
// let the peer deliver the chunkn and periodically check if a peer
// has timedout. When done downloading, process blocks.
if err := d.startFetchingBlocks(p); err != nil {
glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
// XXX reset
return err
}
glog.V(logger.Detail).Infoln("Sync completed")
return nil
} }
// XXX Make synchronous // XXX Make synchronous
@ -403,13 +380,12 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
} }
peer.mu.Lock() peer.mu.Lock()
peer.td = td
peer.recentHash = block.Hash() peer.recentHash = block.Hash()
peer.mu.Unlock() peer.mu.Unlock()
peer.promote() peer.promote()
glog.V(logger.Detail).Infoln("Inserting new block from:", id) glog.V(logger.Detail).Infoln("Inserting new block from:", id)
d.queue.addBlock(id, block, td) d.queue.addBlock(id, block)
// if neither go ahead to process // if neither go ahead to process
if d.isBusy() { if d.isBusy() {
@ -429,10 +405,10 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
} }
} }
return d.process() return d.process(peer)
} }
func (d *Downloader) process() error { func (d *Downloader) process(peer *peer) error {
atomic.StoreInt32(&d.processingBlocks, 1) atomic.StoreInt32(&d.processingBlocks, 1)
defer atomic.StoreInt32(&d.processingBlocks, 0) defer atomic.StoreInt32(&d.processingBlocks, 0)
@ -458,18 +434,8 @@ func (d *Downloader) process() error {
// grandparents can be requested and queued. // grandparents can be requested and queued.
err = d.insertChain(blocks[:max]) err = d.insertChain(blocks[:max])
if err != nil && core.IsParentErr(err) { if err != nil && core.IsParentErr(err) {
glog.V(logger.Debug).Infoln("Aborting process due to missing parent. Fetching hashes") glog.V(logger.Debug).Infoln("Aborting process due to missing parent.")
// TODO change this. This shite
for i, block := range blocks[:max] {
if !d.hasBlock(block.ParentHash()) {
d.syncCh <- syncPack{d.peers.bestPeer(), block.Hash(), true}
// remove processed blocks
blocks = blocks[i:]
break
}
}
break break
} else if err != nil { } else if err != nil {
// immediatly unregister the false peer but do not disconnect // immediatly unregister the false peer but do not disconnect

View File

@ -49,7 +49,7 @@ type downloadTester struct {
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester { func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)} tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
downloader := New(tester.hasBlock, tester.insertChain, func() *big.Int { return new(big.Int) }) downloader := New(tester.hasBlock, tester.insertChain)
tester.downloader = downloader tester.downloader = downloader
return tester return tester
@ -65,10 +65,6 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool {
func (dl *downloadTester) insertChain(blocks types.Blocks) error { func (dl *downloadTester) insertChain(blocks types.Blocks) error {
dl.insertedBlocks += len(blocks) dl.insertedBlocks += len(blocks)
if len(dl.blocks)-1 <= dl.insertedBlocks {
dl.done <- true
}
return nil return nil
} }
@ -93,14 +89,14 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) { func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++ dl.pcount++
dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, dl.getBlocks(id)) dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
} }
func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) { func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++ dl.pcount++
// This bad peer never returns any blocks // This bad peer never returns any blocks
dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, func([]common.Hash) error { dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error {
return nil return nil
}) })
} }
@ -112,7 +108,8 @@ func TestDownload(t *testing.T) {
minDesiredPeerCount = 4 minDesiredPeerCount = 4
blockTtl = 1 * time.Second blockTtl = 1 * time.Second
hashes := createHashes(0, 1000) targetBlocks := 1000
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes) blocks := createBlocksFromHashes(hashes)
tester := newTester(t, hashes, blocks) tester := newTester(t, hashes, blocks)
@ -121,21 +118,21 @@ func TestDownload(t *testing.T) {
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
success: err := tester.downloader.Synchronise("peer1", hashes[0])
select { if err != nil {
case <-tester.done: t.Error("download error", err)
break success }
case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
t.Error("timeout") if tester.insertedBlocks != targetBlocks {
t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
} }
} }
func TestMissing(t *testing.T) { func TestMissing(t *testing.T) {
t.Skip()
glog.SetV(logger.Detail) glog.SetV(logger.Detail)
glog.SetToStderr(true) glog.SetToStderr(true)
targetBlocks := 1000
hashes := createHashes(0, 1000) hashes := createHashes(0, 1000)
extraHashes := createHashes(1001, 1003) extraHashes := createHashes(1001, 1003)
blocks := createBlocksFromHashes(append(extraHashes, hashes...)) blocks := createBlocksFromHashes(append(extraHashes, hashes...))
@ -146,13 +143,12 @@ func TestMissing(t *testing.T) {
hashes = append(extraHashes, hashes[:len(hashes)-1]...) hashes = append(extraHashes, hashes[:len(hashes)-1]...)
tester.newPeer("peer2", big.NewInt(0), common.Hash{}) tester.newPeer("peer2", big.NewInt(0), common.Hash{})
success1: err := tester.downloader.Synchronise("peer1", hashes[0])
select { if err != nil {
case <-tester.done: t.Error("download error", err)
break success1
case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
t.Error("timout")
} }
tester.downloader.AddBlock("peer2", blocks[hashes[len(hashes)-1]], big.NewInt(10001)) if tester.insertedBlocks != targetBlocks {
t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
}
} }

View File

@ -2,7 +2,6 @@ package downloader
import ( import (
"errors" "errors"
"math/big"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -51,16 +50,6 @@ func (p peers) getPeer(id string) *peer {
return p[id] return p[id]
} }
func (p peers) bestPeer() *peer {
var peer *peer
for _, cp := range p {
if peer == nil || cp.td.Cmp(peer.td) > 0 {
peer = cp
}
}
return peer
}
// peer represents an active peer // peer represents an active peer
type peer struct { type peer struct {
state int // Peer state (working, idle) state int // Peer state (working, idle)
@ -68,7 +57,6 @@ type peer struct {
mu sync.RWMutex mu sync.RWMutex
id string id string
td *big.Int
recentHash common.Hash recentHash common.Hash
ignored *set.Set ignored *set.Set
@ -78,10 +66,9 @@ type peer struct {
} }
// create a new peer // create a new peer
func newPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer { func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer {
return &peer{ return &peer{
id: id, id: id,
td: td,
recentHash: hash, recentHash: hash,
getHashes: getHashes, getHashes: getHashes,
getBlocks: getBlocks, getBlocks: getBlocks,

View File

@ -2,7 +2,6 @@ package downloader
import ( import (
"math" "math"
"math/big"
"sync" "sync"
"time" "time"
@ -93,7 +92,7 @@ func (c *queue) has(hash common.Hash) bool {
return c.hashPool.Has(hash) || c.fetchPool.Has(hash) return c.hashPool.Has(hash) || c.fetchPool.Has(hash)
} }
func (c *queue) addBlock(id string, block *types.Block, td *big.Int) { func (c *queue) addBlock(id string, block *types.Block) {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()

View File

@ -1,79 +0,0 @@
package downloader
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
// THIS IS PENDING AND TO DO CHANGES FOR MAKING THE DOWNLOADER SYNCHRONOUS
// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) {
// Check if we're busy
if d.isBusy() {
return nil, errBusy
}
// Attempt to select a peer. This can either be nothing, which returns, best peer
// or selected peer. If no peer could be found an error will be returned
var p *peer
if len(id) == 0 {
p = d.peers[id]
if p == nil {
return nil, errUnknownPeer
}
} else {
p = d.peers.bestPeer()
}
// Make sure our td is lower than the peer's td
if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
return nil, errLowTd
}
// Get the hash from the peer and initiate the downloading progress.
err := d.getFromPeer(p, p.recentHash, false)
if err != nil {
return nil, err
}
return d.queue.blocks, nil
}
// Synchronise will synchronise using the best peer.
func (d *Downloader) Synchronise() (types.Blocks, error) {
return d.SynchroniseWithPeer("")
}
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
d.activePeer = p.id
glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
// Start the fetcher. This will block the update entirely
// interupts need to be send to the appropriate channels
// respectively.
if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
// handle error
glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
// XXX Reset
return err
}
// Start fetching blocks in paralel. The strategy is simple
// take any available peers, seserve a chunk for each peer available,
// let the peer deliver the chunkn and periodically check if a peer
// has timedout. When done downloading, process blocks.
if err := d.startFetchingBlocks(p); err != nil {
glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
// XXX reset
return err
}
glog.V(logger.Detail).Infoln("Sync completed")
return nil
}

View File

@ -39,6 +39,7 @@ import (
"math" "math"
"math/big" "math/big"
"sync" "sync"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -51,6 +52,11 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
const (
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
)
func errResp(code errCode, format string, v ...interface{}) error { func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
} }
@ -82,6 +88,9 @@ type ProtocolManager struct {
eventMux *event.TypeMux eventMux *event.TypeMux
txSub event.Subscription txSub event.Subscription
minedBlockSub event.Subscription minedBlockSub event.Subscription
newPeerCh chan *peer
quitSync chan struct{}
} }
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
@ -93,6 +102,8 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
chainman: chainman, chainman: chainman,
downloader: downloader, downloader: downloader,
peers: make(map[string]*peer), peers: make(map[string]*peer),
newPeerCh: make(chan *peer, 1),
quitSync: make(chan struct{}),
} }
manager.SubProtocol = p2p.Protocol{ manager.SubProtocol = p2p.Protocol{
@ -101,16 +112,67 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
Length: ProtocolLength, Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw) peer := manager.newPeer(protocolVersion, networkId, p, rw)
err := manager.handle(peer)
//glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err)
return err manager.newPeerCh <- peer
return manager.handle(peer)
}, },
} }
return manager return manager
} }
func (pm *ProtocolManager) syncHandler() {
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
itimer := time.NewTimer(peerCountTimeout)
out:
for {
select {
case <-pm.newPeerCh:
// Meet the `minDesiredPeerCount` before we select our best peer
if len(pm.peers) < minDesiredPeerCount {
break
}
// Find the best peer
peer := getBestPeer(pm.peers)
if peer == nil {
glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
}
itimer.Stop()
go pm.synchronise(peer)
case <-itimer.C:
// The timer will make sure that the downloader keeps an active state
// in which it attempts to always check the network for highest td peers
// Either select the peer or restart the timer if no peers could
// be selected.
if peer := getBestPeer(pm.peers); peer != nil {
go pm.synchronise(peer)
} else {
itimer.Reset(5 * time.Second)
}
case <-pm.quitSync:
break out
}
}
}
func (pm *ProtocolManager) synchronise(peer *peer) {
// Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return
}
glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
// Get the hashes from the peer (synchronously)
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
if err != nil {
// handle error
glog.V(logger.Debug).Infoln("error downloading:", err)
}
}
func (pm *ProtocolManager) Start() { func (pm *ProtocolManager) Start() {
// broadcast transactions // broadcast transactions
pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{}) pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{})
@ -119,11 +181,15 @@ func (pm *ProtocolManager) Start() {
// broadcast mined blocks // broadcast mined blocks
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
go pm.minedBroadcastLoop() go pm.minedBroadcastLoop()
// sync handler
go pm.syncHandler()
} }
func (pm *ProtocolManager) Stop() { func (pm *ProtocolManager) Stop() {
pm.txSub.Unsubscribe() // quits txBroadcastLoop pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
close(pm.quitSync) // quits the sync handler
} }
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@ -141,7 +207,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
pm.peers[p.id] = p pm.peers[p.id] = p
pm.pmu.Unlock() pm.pmu.Unlock()
pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks) pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
defer func() { defer func() {
pm.pmu.Lock() pm.pmu.Lock()
defer pm.pmu.Unlock() defer pm.pmu.Unlock()
@ -313,6 +379,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
} else { } else {
// adding blocks is synchronous // adding blocks is synchronous
go func() { go func() {
// TODO check parent error
err := self.downloader.AddBlock(p.id, request.Block, request.TD) err := self.downloader.AddBlock(p.id, request.Block, request.TD)
if err != nil { if err != nil {
glog.V(logger.Detail).Infoln("downloader err:", err) glog.V(logger.Detail).Infoln("downloader err:", err)

View File

@ -25,6 +25,16 @@ type getBlockHashesMsgData struct {
Amount uint64 Amount uint64
} }
func getBestPeer(peers map[string]*peer) *peer {
var peer *peer
for _, cp := range peers {
if peer == nil || cp.td.Cmp(peer.td) > 0 {
peer = cp
}
}
return peer
}
type peer struct { type peer struct {
*p2p.Peer *p2p.Peer
@ -32,9 +42,9 @@ type peer struct {
protv, netid int protv, netid int
currentHash common.Hash recentHash common.Hash
id string id string
td *big.Int td *big.Int
genesis, ourHash common.Hash genesis, ourHash common.Hash
ourTd *big.Int ourTd *big.Int
@ -43,14 +53,14 @@ type peer struct {
blockHashes *set.Set blockHashes *set.Set
} }
func newPeer(protv, netid int, genesis, currentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func newPeer(protv, netid int, genesis, recentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID() id := p.ID()
return &peer{ return &peer{
Peer: p, Peer: p,
rw: rw, rw: rw,
genesis: genesis, genesis: genesis,
ourHash: currentHash, ourHash: recentHash,
ourTd: td, ourTd: td,
protv: protv, protv: protv,
netid: netid, netid: netid,
@ -145,7 +155,7 @@ func (p *peer) handleStatus() error {
// Set the total difficulty of the peer // Set the total difficulty of the peer
p.td = status.TD p.td = status.TD
// set the best hash of the peer // set the best hash of the peer
p.currentHash = status.CurrentBlock p.recentHash = status.CurrentBlock
return <-errc return <-errc
} }

View File

@ -236,6 +236,10 @@ func (self *XEth) CurrentBlock() *types.Block {
return self.backend.ChainManager().CurrentBlock() return self.backend.ChainManager().CurrentBlock()
} }
func (self *XEth) GasLimit() *big.Int {
return self.backend.ChainManager().GasLimit()
}
func (self *XEth) Block(v interface{}) *Block { func (self *XEth) Block(v interface{}) *Block {
if n, ok := v.(int32); ok { if n, ok := v.(int32); ok {
return self.BlockByNumber(int64(n)) return self.BlockByNumber(int64(n))