mirror of https://github.com/status-im/op-geth.git
core: support inserting pure header chains
This commit is contained in:
parent
92f9a3e5fa
commit
c33cc382b3
|
@ -369,13 +369,26 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error)
|
||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateHeader verifies the validity of a header, relying on the database and
|
||||||
|
// POW behind the block processor.
|
||||||
|
func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle bool) error {
|
||||||
|
// Short circuit if the header's already known or its parent missing
|
||||||
|
if sm.bc.HasHeader(header.Hash()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if parent := sm.bc.GetHeader(header.ParentHash); parent == nil {
|
||||||
|
return ParentError(header.ParentHash)
|
||||||
|
} else {
|
||||||
|
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// See YP section 4.3.4. "Block Header Validity"
|
// See YP section 4.3.4. "Block Header Validity"
|
||||||
// Validates a header. Returns an error if the header is invalid.
|
// Validates a header. Returns an error if the header is invalid.
|
||||||
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
|
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
|
||||||
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
|
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
|
||||||
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
|
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
|
||||||
}
|
}
|
||||||
|
|
||||||
if uncle {
|
if uncle {
|
||||||
if header.Time.Cmp(common.MaxBig) == 1 {
|
if header.Time.Cmp(common.MaxBig) == 1 {
|
||||||
return BlockTSTooBigErr
|
return BlockTSTooBigErr
|
||||||
|
|
|
@ -67,9 +67,9 @@ type BlockChain struct {
|
||||||
chainmu sync.RWMutex
|
chainmu sync.RWMutex
|
||||||
tsmu sync.RWMutex
|
tsmu sync.RWMutex
|
||||||
|
|
||||||
td *big.Int
|
checkpoint int // checkpoint counts towards the new checkpoint
|
||||||
currentBlock *types.Block
|
currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
|
||||||
currentGasLimit *big.Int
|
currentBlock *types.Block // Current head of the block chain
|
||||||
|
|
||||||
headerCache *lru.Cache // Cache for the most recent block headers
|
headerCache *lru.Cache // Cache for the most recent block headers
|
||||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||||
|
@ -120,20 +120,15 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
|
||||||
}
|
}
|
||||||
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
|
||||||
}
|
}
|
||||||
if err := bc.setLastState(); err != nil {
|
if err := bc.loadLastState(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||||
for hash, _ := range BadHashes {
|
for hash, _ := range BadHashes {
|
||||||
if block := bc.GetBlock(hash); block != nil {
|
if header := bc.GetHeader(hash); header != nil {
|
||||||
glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
|
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
|
||||||
block = bc.GetBlock(block.ParentHash())
|
bc.SetHead(header.Number.Uint64() - 1)
|
||||||
if block == nil {
|
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
|
||||||
glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
|
|
||||||
}
|
|
||||||
bc.SetHead(block)
|
|
||||||
|
|
||||||
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Take ownership of this particular state
|
// Take ownership of this particular state
|
||||||
|
@ -141,30 +136,75 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
|
||||||
return bc, nil
|
return bc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) SetHead(head *types.Block) {
|
// loadLastState loads the last known chain state from the database. This method
|
||||||
|
// assumes that the chain manager mutex is held.
|
||||||
|
func (self *BlockChain) loadLastState() error {
|
||||||
|
// Restore the last known head block
|
||||||
|
head := GetHeadBlockHash(self.chainDb)
|
||||||
|
if head == (common.Hash{}) {
|
||||||
|
// Corrupt or empty database, init from scratch
|
||||||
|
self.Reset()
|
||||||
|
} else {
|
||||||
|
if block := self.GetBlock(head); block != nil {
|
||||||
|
// Block found, set as the current head
|
||||||
|
self.currentBlock = block
|
||||||
|
} else {
|
||||||
|
// Corrupt or empty database, init from scratch
|
||||||
|
self.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Restore the last known head header
|
||||||
|
self.currentHeader = self.currentBlock.Header()
|
||||||
|
if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
|
||||||
|
if header := self.GetHeader(head); header != nil {
|
||||||
|
self.currentHeader = header
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Issue a status log and return
|
||||||
|
headerTd := self.GetTd(self.currentHeader.Hash())
|
||||||
|
blockTd := self.GetTd(self.currentBlock.Hash())
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash(), headerTd)
|
||||||
|
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash(), blockTd)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHead rewind the local chain to a new head entity. In the case of headers,
|
||||||
|
// everything above the new head will be deleted and the new one set. In the case
|
||||||
|
// of blocks though, the head may be further rewound if block bodies are missing
|
||||||
|
// (non-archive nodes after a fast sync).
|
||||||
|
func (bc *BlockChain) SetHead(head uint64) {
|
||||||
bc.mu.Lock()
|
bc.mu.Lock()
|
||||||
defer bc.mu.Unlock()
|
defer bc.mu.Unlock()
|
||||||
|
|
||||||
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
|
// Delete everything from the current header head (is above block head)
|
||||||
DeleteBlock(bc.chainDb, block.Hash())
|
for i := bc.currentHeader.Number.Uint64(); i > head; i-- {
|
||||||
|
if hash := GetCanonicalHash(bc.chainDb, i); hash != (common.Hash{}) {
|
||||||
|
DeleteCanonicalHash(bc.chainDb, i)
|
||||||
|
DeleteHeader(bc.chainDb, hash)
|
||||||
|
DeleteBody(bc.chainDb, hash)
|
||||||
|
DeleteTd(bc.chainDb, hash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
bc.currentHeader = GetHeader(bc.chainDb, GetCanonicalHash(bc.chainDb, head))
|
||||||
|
|
||||||
|
// Rewind the block chain until a whole block is found
|
||||||
|
for bc.GetBlockByNumber(head) == nil {
|
||||||
|
head--
|
||||||
|
}
|
||||||
|
bc.currentBlock = bc.GetBlockByNumber(head)
|
||||||
|
|
||||||
|
// Clear out any stale content from the caches
|
||||||
bc.headerCache.Purge()
|
bc.headerCache.Purge()
|
||||||
bc.bodyCache.Purge()
|
bc.bodyCache.Purge()
|
||||||
bc.bodyRLPCache.Purge()
|
bc.bodyRLPCache.Purge()
|
||||||
bc.blockCache.Purge()
|
bc.blockCache.Purge()
|
||||||
bc.futureBlocks.Purge()
|
bc.futureBlocks.Purge()
|
||||||
|
|
||||||
bc.currentBlock = head
|
// Update all computed fields to the new head
|
||||||
bc.setTotalDifficulty(bc.GetTd(head.Hash()))
|
bc.insert(bc.currentBlock)
|
||||||
bc.insert(head)
|
bc.loadLastState()
|
||||||
bc.setLastState()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *BlockChain) Td() *big.Int {
|
|
||||||
self.mu.RLock()
|
|
||||||
defer self.mu.RUnlock()
|
|
||||||
|
|
||||||
return new(big.Int).Set(self.td)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *BlockChain) GasLimit() *big.Int {
|
func (self *BlockChain) GasLimit() *big.Int {
|
||||||
|
@ -181,6 +221,19 @@ func (self *BlockChain) LastBlockHash() common.Hash {
|
||||||
return self.currentBlock.Hash()
|
return self.currentBlock.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
|
// header is retrieved from the chain manager's internal cache, involving no
|
||||||
|
// database operations.
|
||||||
|
func (self *BlockChain) CurrentHeader() *types.Header {
|
||||||
|
self.mu.RLock()
|
||||||
|
defer self.mu.RUnlock()
|
||||||
|
|
||||||
|
return self.currentHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock retrieves the current head block of the canonical chain. The
|
||||||
|
// block is retrieved from the chain manager's internal cache, involving no
|
||||||
|
// database operations.
|
||||||
func (self *BlockChain) CurrentBlock() *types.Block {
|
func (self *BlockChain) CurrentBlock() *types.Block {
|
||||||
self.mu.RLock()
|
self.mu.RLock()
|
||||||
defer self.mu.RUnlock()
|
defer self.mu.RUnlock()
|
||||||
|
@ -192,7 +245,7 @@ func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesis
|
||||||
self.mu.RLock()
|
self.mu.RLock()
|
||||||
defer self.mu.RUnlock()
|
defer self.mu.RUnlock()
|
||||||
|
|
||||||
return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash()
|
return self.GetTd(self.currentBlock.Hash()), self.currentBlock.Hash(), self.genesisBlock.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
|
func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
|
||||||
|
@ -203,26 +256,6 @@ func (self *BlockChain) State() (*state.StateDB, error) {
|
||||||
return state.New(self.CurrentBlock().Root(), self.chainDb)
|
return state.New(self.CurrentBlock().Root(), self.chainDb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) setLastState() error {
|
|
||||||
head := GetHeadBlockHash(bc.chainDb)
|
|
||||||
if head != (common.Hash{}) {
|
|
||||||
block := bc.GetBlock(head)
|
|
||||||
if block != nil {
|
|
||||||
bc.currentBlock = block
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bc.Reset()
|
|
||||||
}
|
|
||||||
bc.td = bc.GetTd(bc.currentBlock.Hash())
|
|
||||||
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
|
|
||||||
|
|
||||||
if glog.V(logger.Info) {
|
|
||||||
glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||||
func (bc *BlockChain) Reset() {
|
func (bc *BlockChain) Reset() {
|
||||||
bc.ResetWithGenesisBlock(bc.genesisBlock)
|
bc.ResetWithGenesisBlock(bc.genesisBlock)
|
||||||
|
@ -238,6 +271,9 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
|
||||||
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
|
||||||
DeleteBlock(bc.chainDb, block.Hash())
|
DeleteBlock(bc.chainDb, block.Hash())
|
||||||
}
|
}
|
||||||
|
for header := bc.currentHeader; header != nil; header = bc.GetHeader(header.ParentHash) {
|
||||||
|
DeleteBlock(bc.chainDb, header.Hash())
|
||||||
|
}
|
||||||
bc.headerCache.Purge()
|
bc.headerCache.Purge()
|
||||||
bc.bodyCache.Purge()
|
bc.bodyCache.Purge()
|
||||||
bc.bodyRLPCache.Purge()
|
bc.bodyRLPCache.Purge()
|
||||||
|
@ -254,7 +290,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
|
||||||
bc.genesisBlock = genesis
|
bc.genesisBlock = genesis
|
||||||
bc.insert(bc.genesisBlock)
|
bc.insert(bc.genesisBlock)
|
||||||
bc.currentBlock = bc.genesisBlock
|
bc.currentBlock = bc.genesisBlock
|
||||||
bc.setTotalDifficulty(genesis.Difficulty())
|
bc.currentHeader = bc.genesisBlock.Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export writes the active chain to the given writer.
|
// Export writes the active chain to the given writer.
|
||||||
|
@ -290,17 +326,26 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert injects a block into the current chain block chain. Note, this function
|
// insert injects a new head block into the current block chain. This method
|
||||||
// assumes that the `mu` mutex is held!
|
// assumes that the block is indeed a true head. It will also reset the head
|
||||||
|
// header to this very same block to prevent the headers from diverging on a
|
||||||
|
// different header chain.
|
||||||
|
//
|
||||||
|
// Note, this function assumes that the `mu` mutex is held!
|
||||||
func (bc *BlockChain) insert(block *types.Block) {
|
func (bc *BlockChain) insert(block *types.Block) {
|
||||||
// Add the block to the canonical chain number scheme and mark as the head
|
// Add the block to the canonical chain number scheme and mark as the head
|
||||||
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||||
glog.Fatalf("failed to insert block number: %v", err)
|
glog.Fatalf("failed to insert block number: %v", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
glog.Fatalf("failed to insert block number: %v", err)
|
glog.Fatalf("failed to insert head block hash: %v", err)
|
||||||
}
|
}
|
||||||
|
if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
|
glog.Fatalf("failed to insert head header hash: %v", err)
|
||||||
|
}
|
||||||
|
// Update the internal state with the head block
|
||||||
bc.currentBlock = block
|
bc.currentBlock = block
|
||||||
|
bc.currentHeader = block.Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
|
@ -456,19 +501,15 @@ func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*ty
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
|
// GetUnclesInChain retrieves all the uncles from a given block backwards until
|
||||||
|
// a specific distance is reached.
|
||||||
|
func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
|
||||||
|
uncles := []*types.Header{}
|
||||||
for i := 0; block != nil && i < length; i++ {
|
for i := 0; block != nil && i < length; i++ {
|
||||||
uncles = append(uncles, block.Uncles()...)
|
uncles = append(uncles, block.Uncles()...)
|
||||||
block = self.GetBlock(block.ParentHash())
|
block = self.GetBlock(block.ParentHash())
|
||||||
}
|
}
|
||||||
|
return uncles
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// setTotalDifficulty updates the TD of the chain manager. Note, this function
|
|
||||||
// assumes that the `mu` mutex is held!
|
|
||||||
func (bc *BlockChain) setTotalDifficulty(td *big.Int) {
|
|
||||||
bc.td = new(big.Int).Set(td)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) Stop() {
|
func (bc *BlockChain) Stop() {
|
||||||
|
@ -504,6 +545,135 @@ const (
|
||||||
SideStatTy
|
SideStatTy
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// writeHeader writes a header into the local chain, given that its parent is
|
||||||
|
// already known. If the total difficulty of the newly inserted header becomes
|
||||||
|
// greater than the old known TD, the canonical chain is re-routed.
|
||||||
|
//
|
||||||
|
// Note: This method is not concurrent-safe with inserting blocks simultaneously
|
||||||
|
// into the chain, as side effects caused by reorganizations cannot be emulated
|
||||||
|
// without the real blocks. Hence, writing headers directly should only be done
|
||||||
|
// in two scenarios: pure-header mode of operation (light clients), or properly
|
||||||
|
// separated header/block phases (non-archive clients).
|
||||||
|
func (self *BlockChain) writeHeader(header *types.Header) error {
|
||||||
|
self.wg.Add(1)
|
||||||
|
defer self.wg.Done()
|
||||||
|
|
||||||
|
// Calculate the total difficulty of the header
|
||||||
|
ptd := self.GetTd(header.ParentHash)
|
||||||
|
if ptd == nil {
|
||||||
|
return ParentError(header.ParentHash)
|
||||||
|
}
|
||||||
|
td := new(big.Int).Add(header.Difficulty, ptd)
|
||||||
|
|
||||||
|
// Make sure no inconsistent state is leaked during insertion
|
||||||
|
self.mu.Lock()
|
||||||
|
defer self.mu.Unlock()
|
||||||
|
|
||||||
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
|
if td.Cmp(self.GetTd(self.currentHeader.Hash())) > 0 {
|
||||||
|
// Delete any canonical number assignments above the new head
|
||||||
|
for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
|
||||||
|
DeleteCanonicalHash(self.chainDb, i)
|
||||||
|
}
|
||||||
|
// Overwrite any stale canonical number assignments
|
||||||
|
head := self.GetHeader(header.ParentHash)
|
||||||
|
for GetCanonicalHash(self.chainDb, head.Number.Uint64()) != head.Hash() {
|
||||||
|
WriteCanonicalHash(self.chainDb, head.Hash(), head.Number.Uint64())
|
||||||
|
head = self.GetHeader(head.ParentHash)
|
||||||
|
}
|
||||||
|
// Extend the canonical chain with the new header
|
||||||
|
if err := WriteCanonicalHash(self.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
|
||||||
|
glog.Fatalf("failed to insert header number: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteHeadHeaderHash(self.chainDb, header.Hash()); err != nil {
|
||||||
|
glog.Fatalf("failed to insert head header hash: %v", err)
|
||||||
|
}
|
||||||
|
self.currentHeader = types.CopyHeader(header)
|
||||||
|
}
|
||||||
|
// Irrelevant of the canonical status, write the header itself to the database
|
||||||
|
if err := WriteTd(self.chainDb, header.Hash(), td); err != nil {
|
||||||
|
glog.Fatalf("failed to write header total difficulty: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteHeader(self.chainDb, header); err != nil {
|
||||||
|
glog.Fatalf("filed to write header contents: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertHeaderChain will attempt to insert the given header chain in to the
|
||||||
|
// local chain, possibly creating a dork. If an error is returned, it will
|
||||||
|
// return the index number of the failing header as well an error describing
|
||||||
|
// what went wrong.
|
||||||
|
//
|
||||||
|
// The verify parameter can be used to fine tune whether nonce verification
|
||||||
|
// should be done or not. The reason behind the optional check is because some
|
||||||
|
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
||||||
|
// because nonces can be verified sparsely, not needing to check each.
|
||||||
|
func (self *BlockChain) InsertHeaderChain(chain []*types.Header, verify bool) (int, error) {
|
||||||
|
self.wg.Add(1)
|
||||||
|
defer self.wg.Done()
|
||||||
|
|
||||||
|
// Make sure only one thread manipulates the chain at once
|
||||||
|
self.chainmu.Lock()
|
||||||
|
defer self.chainmu.Unlock()
|
||||||
|
|
||||||
|
// Collect some import statistics to report on
|
||||||
|
stats := struct{ processed, ignored int }{}
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
// Start the parallel nonce verifier, with a fake nonce if not requested
|
||||||
|
verifier := self.pow
|
||||||
|
if !verify {
|
||||||
|
verifier = FakePow{}
|
||||||
|
}
|
||||||
|
nonceAbort, nonceResults := verifyNoncesFromHeaders(verifier, chain)
|
||||||
|
defer close(nonceAbort)
|
||||||
|
|
||||||
|
// Iterate over the headers, inserting any new ones
|
||||||
|
complete := make([]bool, len(chain))
|
||||||
|
for i, header := range chain {
|
||||||
|
// Short circuit insertion if shutting down
|
||||||
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
|
glog.V(logger.Debug).Infoln("Premature abort during header chain processing")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
hash := header.Hash()
|
||||||
|
|
||||||
|
// Accumulate verification results until the next header is verified
|
||||||
|
for !complete[i] {
|
||||||
|
if res := <-nonceResults; res.valid {
|
||||||
|
complete[res.index] = true
|
||||||
|
} else {
|
||||||
|
header := chain[res.index]
|
||||||
|
return res.index, &BlockNonceErr{
|
||||||
|
Hash: header.Hash(),
|
||||||
|
Number: new(big.Int).Set(header.Number),
|
||||||
|
Nonce: header.Nonce.Uint64(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if BadHashes[hash] {
|
||||||
|
glog.V(logger.Error).Infof("Bad header %d [%x…], known bad hash", header.Number, hash)
|
||||||
|
return i, BadHashError(hash)
|
||||||
|
}
|
||||||
|
// Write the header to the chain and get the status
|
||||||
|
if self.HasHeader(hash) {
|
||||||
|
stats.ignored++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := self.writeHeader(header); err != nil {
|
||||||
|
return i, err
|
||||||
|
}
|
||||||
|
stats.processed++
|
||||||
|
}
|
||||||
|
// Report some public statistics so the user has a clue what's going on
|
||||||
|
first, last := chain[0], chain[len(chain)-1]
|
||||||
|
glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
|
||||||
|
time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
|
||||||
|
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
// WriteBlock writes the block to the chain.
|
// WriteBlock writes the block to the chain.
|
||||||
func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
|
func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
|
||||||
self.wg.Add(1)
|
self.wg.Add(1)
|
||||||
|
@ -522,7 +692,7 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
|
||||||
|
|
||||||
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
||||||
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
||||||
if td.Cmp(self.Td()) > 0 {
|
if td.Cmp(self.GetTd(self.currentBlock.Hash())) > 0 {
|
||||||
// chain fork
|
// chain fork
|
||||||
if block.ParentHash() != cblock.Hash() {
|
if block.ParentHash() != cblock.Hash() {
|
||||||
// during split we merge two different chains and create the new canonical chain
|
// during split we merge two different chains and create the new canonical chain
|
||||||
|
@ -534,7 +704,6 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
|
||||||
status = CanonStatTy
|
status = CanonStatTy
|
||||||
|
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
self.setTotalDifficulty(td)
|
|
||||||
self.insert(block)
|
self.insert(block)
|
||||||
self.mu.Unlock()
|
self.mu.Unlock()
|
||||||
} else {
|
} else {
|
||||||
|
@ -580,7 +749,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||||
txcount := 0
|
txcount := 0
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
glog.V(logger.Debug).Infoln("Premature abort during chain processing")
|
glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -788,8 +957,7 @@ func (self *BlockChain) postChainEvents(events []interface{}) {
|
||||||
if event, ok := event.(ChainEvent); ok {
|
if event, ok := event.(ChainEvent); ok {
|
||||||
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
|
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
|
||||||
// and in most cases isn't even necessary.
|
// and in most cases isn't even necessary.
|
||||||
if self.currentBlock.Hash() == event.Hash {
|
if self.LastBlockHash() == event.Hash {
|
||||||
self.currentGasLimit = CalcGasLimit(event.Block)
|
|
||||||
self.eventMux.Post(ChainHeadEvent{event.Block})
|
self.eventMux.Post(ChainHeadEvent{event.Block})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,44 +64,58 @@ func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test fork of length N starting from block i
|
// Test fork of length N starting from block i
|
||||||
func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) {
|
func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
||||||
// switch databases to process the new chain
|
// Copy old chain up to #i into a new db
|
||||||
db, err := ethdb.NewMemDatabase()
|
db, processor2, err := newCanonical(i, full)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Failed to create db:", err)
|
|
||||||
}
|
|
||||||
// copy old chain up to i into new db with deterministic canonical
|
|
||||||
bman2, err := newCanonical(i, db)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("could not make new canonical in testFork", err)
|
t.Fatal("could not make new canonical in testFork", err)
|
||||||
}
|
}
|
||||||
// assert the bmans have the same block at i
|
// Assert the chains have the same header/block at #i
|
||||||
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
|
var hash1, hash2 common.Hash
|
||||||
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
|
if full {
|
||||||
if bi1 != bi2 {
|
hash1 = processor.bc.GetBlockByNumber(uint64(i)).Hash()
|
||||||
fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
|
hash2 = processor2.bc.GetBlockByNumber(uint64(i)).Hash()
|
||||||
t.Fatal("chains do not have the same hash at height", i)
|
} else {
|
||||||
|
hash1 = processor.bc.GetHeaderByNumber(uint64(i)).Hash()
|
||||||
|
hash2 = processor2.bc.GetHeaderByNumber(uint64(i)).Hash()
|
||||||
}
|
}
|
||||||
bman2.bc.SetProcessor(bman2)
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
||||||
// extend the fork
|
|
||||||
parent := bman2.bc.CurrentBlock()
|
|
||||||
chainB := makeChain(parent, N, db, forkSeed)
|
|
||||||
_, err = bman2.bc.InsertChain(chainB)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Insert chain error for fork:", err)
|
|
||||||
}
|
}
|
||||||
|
// Extend the newly created chain
|
||||||
tdpre := bman.bc.Td()
|
var (
|
||||||
// Test the fork's blocks on the original chain
|
blockChainB []*types.Block
|
||||||
td, err := testChain(chainB, bman)
|
headerChainB []*types.Header
|
||||||
if err != nil {
|
)
|
||||||
t.Fatal("expected chainB not to give errors:", err)
|
if full {
|
||||||
|
blockChainB = makeBlockChain(processor2.bc.CurrentBlock(), n, db, forkSeed)
|
||||||
|
if _, err := processor2.bc.InsertChain(blockChainB); err != nil {
|
||||||
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed)
|
||||||
|
if _, err := processor2.bc.InsertHeaderChain(headerChainB, true); err != nil {
|
||||||
|
t.Fatalf("failed to insert forking chain: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Compare difficulties
|
// Sanity check that the forked chain can be imported into the original
|
||||||
f(tdpre, td)
|
var tdPre, tdPost *big.Int
|
||||||
|
|
||||||
// Loop over parents making sure reconstruction is done properly
|
if full {
|
||||||
|
tdPre = processor.bc.GetTd(processor.bc.CurrentBlock().Hash())
|
||||||
|
if err := testBlockChainImport(blockChainB, processor); err != nil {
|
||||||
|
t.Fatalf("failed to import forked block chain: %v", err)
|
||||||
|
}
|
||||||
|
tdPost = processor.bc.GetTd(blockChainB[len(blockChainB)-1].Hash())
|
||||||
|
} else {
|
||||||
|
tdPre = processor.bc.GetTd(processor.bc.CurrentHeader().Hash())
|
||||||
|
if err := testHeaderChainImport(headerChainB, processor); err != nil {
|
||||||
|
t.Fatalf("failed to import forked header chain: %v", err)
|
||||||
|
}
|
||||||
|
tdPost = processor.bc.GetTd(headerChainB[len(headerChainB)-1].Hash())
|
||||||
|
}
|
||||||
|
// Compare the total difficulties of the chains
|
||||||
|
comparator(tdPre, tdPost)
|
||||||
}
|
}
|
||||||
|
|
||||||
func printChain(bc *BlockChain) {
|
func printChain(bc *BlockChain) {
|
||||||
|
@ -111,22 +125,41 @@ func printChain(bc *BlockChain) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// process blocks against a chain
|
// testBlockChainImport tries to process a chain of blocks, writing them into
|
||||||
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
|
// the database if successful.
|
||||||
for _, block := range chainB {
|
func testBlockChainImport(chain []*types.Block, processor *BlockProcessor) error {
|
||||||
_, _, err := bman.bc.processor.Process(block)
|
for _, block := range chain {
|
||||||
if err != nil {
|
// Try and process the block
|
||||||
|
if _, _, err := processor.Process(block); err != nil {
|
||||||
if IsKnownBlockErr(err) {
|
if IsKnownBlockErr(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
bman.bc.mu.Lock()
|
// Manually insert the block into the database, but don't reorganize (allows subsequent testing)
|
||||||
WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
|
processor.bc.mu.Lock()
|
||||||
WriteBlock(bman.bc.chainDb, block)
|
WriteTd(processor.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), processor.bc.GetTd(block.ParentHash())))
|
||||||
bman.bc.mu.Unlock()
|
WriteBlock(processor.chainDb, block)
|
||||||
|
processor.bc.mu.Unlock()
|
||||||
}
|
}
|
||||||
return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// testHeaderChainImport tries to process a chain of header, writing them into
|
||||||
|
// the database if successful.
|
||||||
|
func testHeaderChainImport(chain []*types.Header, processor *BlockProcessor) error {
|
||||||
|
for _, header := range chain {
|
||||||
|
// Try and validate the header
|
||||||
|
if err := processor.ValidateHeader(header, false, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
|
||||||
|
processor.bc.mu.Lock()
|
||||||
|
WriteTd(processor.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, processor.bc.GetTd(header.ParentHash)))
|
||||||
|
WriteHeader(processor.chainDb, header)
|
||||||
|
processor.bc.mu.Unlock()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
|
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
|
||||||
|
@ -154,139 +187,147 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLastBlock(t *testing.T) {
|
func TestLastBlock(t *testing.T) {
|
||||||
db, err := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Failed to create db:", err)
|
|
||||||
}
|
|
||||||
bchain := theBlockChain(db, t)
|
bchain := theBlockChain(db, t)
|
||||||
block := makeChain(bchain.CurrentBlock(), 1, db, 0)[0]
|
block := makeBlockChain(bchain.CurrentBlock(), 1, db, 0)[0]
|
||||||
bchain.insert(block)
|
bchain.insert(block)
|
||||||
if block.Hash() != GetHeadBlockHash(db) {
|
if block.Hash() != GetHeadBlockHash(db) {
|
||||||
t.Errorf("Write/Get HeadBlockHash failed")
|
t.Errorf("Write/Get HeadBlockHash failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExtendCanonical(t *testing.T) {
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||||
CanonicalLength := 5
|
// with various length chains.
|
||||||
db, err := ethdb.NewMemDatabase()
|
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
|
||||||
|
func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
|
||||||
|
|
||||||
|
func testExtendCanonical(t *testing.T, full bool) {
|
||||||
|
length := 5
|
||||||
|
|
||||||
|
// Make first chain starting from genesis
|
||||||
|
_, processor, err := newCanonical(length, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create db:", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
// make first chain starting from genesis
|
// Define the difficulty comparator
|
||||||
bman, err := newCanonical(CanonicalLength, db)
|
better := func(td1, td2 *big.Int) {
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
|
||||||
}
|
|
||||||
f := func(td1, td2 *big.Int) {
|
|
||||||
if td2.Cmp(td1) <= 0 {
|
if td2.Cmp(td1) <= 0 {
|
||||||
t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Start fork from current height (CanonicalLength)
|
// Start fork from current height
|
||||||
testFork(t, bman, CanonicalLength, 1, f)
|
testFork(t, processor, length, 1, full, better)
|
||||||
testFork(t, bman, CanonicalLength, 2, f)
|
testFork(t, processor, length, 2, full, better)
|
||||||
testFork(t, bman, CanonicalLength, 5, f)
|
testFork(t, processor, length, 5, full, better)
|
||||||
testFork(t, bman, CanonicalLength, 10, f)
|
testFork(t, processor, length, 10, full, better)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShorterFork(t *testing.T) {
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||||
db, err := ethdb.NewMemDatabase()
|
// forks do not take canonical ownership.
|
||||||
|
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
|
||||||
|
func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
|
||||||
|
|
||||||
|
func testShorterFork(t *testing.T, full bool) {
|
||||||
|
length := 10
|
||||||
|
|
||||||
|
// Make first chain starting from genesis
|
||||||
|
_, processor, err := newCanonical(length, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create db:", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
// make first chain starting from genesis
|
// Define the difficulty comparator
|
||||||
bman, err := newCanonical(10, db)
|
worse := func(td1, td2 *big.Int) {
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
|
||||||
}
|
|
||||||
f := func(td1, td2 *big.Int) {
|
|
||||||
if td2.Cmp(td1) >= 0 {
|
if td2.Cmp(td1) >= 0 {
|
||||||
t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1)
|
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sum of numbers must be less than 10
|
// Sum of numbers must be less than `length` for this to be a shorter fork
|
||||||
// for this to be a shorter fork
|
testFork(t, processor, 0, 3, full, worse)
|
||||||
testFork(t, bman, 0, 3, f)
|
testFork(t, processor, 0, 7, full, worse)
|
||||||
testFork(t, bman, 0, 7, f)
|
testFork(t, processor, 1, 1, full, worse)
|
||||||
testFork(t, bman, 1, 1, f)
|
testFork(t, processor, 1, 7, full, worse)
|
||||||
testFork(t, bman, 1, 7, f)
|
testFork(t, processor, 5, 3, full, worse)
|
||||||
testFork(t, bman, 5, 3, f)
|
testFork(t, processor, 5, 4, full, worse)
|
||||||
testFork(t, bman, 5, 4, f)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLongerFork(t *testing.T) {
|
// Tests that given a starting canonical chain of a given size, creating longer
|
||||||
db, err := ethdb.NewMemDatabase()
|
// forks do take canonical ownership.
|
||||||
|
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
|
||||||
|
func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
|
||||||
|
|
||||||
|
func testLongerFork(t *testing.T, full bool) {
|
||||||
|
length := 10
|
||||||
|
|
||||||
|
// Make first chain starting from genesis
|
||||||
|
_, processor, err := newCanonical(length, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create db:", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
// make first chain starting from genesis
|
// Define the difficulty comparator
|
||||||
bman, err := newCanonical(10, db)
|
better := func(td1, td2 *big.Int) {
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
|
||||||
}
|
|
||||||
f := func(td1, td2 *big.Int) {
|
|
||||||
if td2.Cmp(td1) <= 0 {
|
if td2.Cmp(td1) <= 0 {
|
||||||
t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sum of numbers must be greater than 10
|
// Sum of numbers must be greater than `length` for this to be a longer fork
|
||||||
// for this to be a longer fork
|
testFork(t, processor, 0, 11, full, better)
|
||||||
testFork(t, bman, 0, 11, f)
|
testFork(t, processor, 0, 15, full, better)
|
||||||
testFork(t, bman, 0, 15, f)
|
testFork(t, processor, 1, 10, full, better)
|
||||||
testFork(t, bman, 1, 10, f)
|
testFork(t, processor, 1, 12, full, better)
|
||||||
testFork(t, bman, 1, 12, f)
|
testFork(t, processor, 5, 6, full, better)
|
||||||
testFork(t, bman, 5, 6, f)
|
testFork(t, processor, 5, 8, full, better)
|
||||||
testFork(t, bman, 5, 8, f)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEqualFork(t *testing.T) {
|
// Tests that given a starting canonical chain of a given size, creating equal
|
||||||
db, err := ethdb.NewMemDatabase()
|
// forks do take canonical ownership.
|
||||||
|
func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
|
||||||
|
func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
|
||||||
|
|
||||||
|
func testEqualFork(t *testing.T, full bool) {
|
||||||
|
length := 10
|
||||||
|
|
||||||
|
// Make first chain starting from genesis
|
||||||
|
_, processor, err := newCanonical(length, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create db:", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
bman, err := newCanonical(10, db)
|
// Define the difficulty comparator
|
||||||
if err != nil {
|
equal := func(td1, td2 *big.Int) {
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
|
||||||
}
|
|
||||||
f := func(td1, td2 *big.Int) {
|
|
||||||
if td2.Cmp(td1) != 0 {
|
if td2.Cmp(td1) != 0 {
|
||||||
t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1)
|
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sum of numbers must be equal to 10
|
// Sum of numbers must be equal to `length` for this to be an equal fork
|
||||||
// for this to be an equal fork
|
testFork(t, processor, 0, 10, full, equal)
|
||||||
testFork(t, bman, 0, 10, f)
|
testFork(t, processor, 1, 9, full, equal)
|
||||||
testFork(t, bman, 1, 9, f)
|
testFork(t, processor, 2, 8, full, equal)
|
||||||
testFork(t, bman, 2, 8, f)
|
testFork(t, processor, 5, 5, full, equal)
|
||||||
testFork(t, bman, 5, 5, f)
|
testFork(t, processor, 6, 4, full, equal)
|
||||||
testFork(t, bman, 6, 4, f)
|
testFork(t, processor, 9, 1, full, equal)
|
||||||
testFork(t, bman, 9, 1, f)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBrokenChain(t *testing.T) {
|
// Tests that chains missing links do not get accepted by the processor.
|
||||||
db, err := ethdb.NewMemDatabase()
|
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
|
||||||
|
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
|
||||||
|
|
||||||
|
func testBrokenChain(t *testing.T, full bool) {
|
||||||
|
// Make chain starting from genesis
|
||||||
|
db, processor, err := newCanonical(10, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Failed to create db:", err)
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
||||||
}
|
}
|
||||||
bman, err := newCanonical(10, db)
|
// Create a forked chain, and try to insert with a missing link
|
||||||
if err != nil {
|
if full {
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
chain := makeBlockChain(processor.bc.CurrentBlock(), 5, db, forkSeed)[1:]
|
||||||
}
|
if err := testBlockChainImport(chain, processor); err == nil {
|
||||||
db2, err := ethdb.NewMemDatabase()
|
t.Errorf("broken block chain not reported")
|
||||||
if err != nil {
|
}
|
||||||
t.Fatal("Failed to create db:", err)
|
} else {
|
||||||
}
|
chain := makeHeaderChain(processor.bc.CurrentHeader(), 5, db, forkSeed)[1:]
|
||||||
bman2, err := newCanonical(10, db2)
|
if err := testHeaderChainImport(chain, processor); err == nil {
|
||||||
if err != nil {
|
t.Errorf("broken header chain not reported")
|
||||||
t.Fatal("Could not make new canonical chain:", err)
|
}
|
||||||
}
|
|
||||||
bman2.bc.SetProcessor(bman2)
|
|
||||||
parent := bman2.bc.CurrentBlock()
|
|
||||||
chainB := makeChain(parent, 5, db2, forkSeed)
|
|
||||||
chainB = chainB[1:]
|
|
||||||
_, err = testChain(chainB, bman)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected broken chain to return error")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,7 +417,16 @@ type bproc struct{}
|
||||||
|
|
||||||
func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
|
func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
|
||||||
|
|
||||||
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
|
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
|
||||||
|
blocks := makeBlockChainWithDiff(genesis, d, seed)
|
||||||
|
headers := make([]*types.Header, len(blocks))
|
||||||
|
for i, block := range blocks {
|
||||||
|
headers[i] = block.Header()
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
|
||||||
var chain []*types.Block
|
var chain []*types.Block
|
||||||
for i, difficulty := range d {
|
for i, difficulty := range d {
|
||||||
header := &types.Header{
|
header := &types.Header{
|
||||||
|
@ -410,142 +460,209 @@ func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
|
||||||
return bc
|
return bc
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReorgLongest(t *testing.T) {
|
// Tests that reorganizing a long difficult chain after a short easy one
|
||||||
db, _ := ethdb.NewMemDatabase()
|
// overwrites the canonical numbers and links in the database.
|
||||||
|
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
||||||
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
||||||
|
|
||||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
func testReorgLong(t *testing.T, full bool) {
|
||||||
if err != nil {
|
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
|
||||||
t.Error(err)
|
}
|
||||||
t.FailNow()
|
|
||||||
}
|
// Tests that reorganizing a short difficult chain after a long easy one
|
||||||
|
// overwrites the canonical numbers and links in the database.
|
||||||
|
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
|
||||||
|
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
||||||
|
|
||||||
|
func testReorgShort(t *testing.T, full bool) {
|
||||||
|
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
|
||||||
|
// Create a pristine block chain
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
genesis, _ := WriteTestNetGenesisBlock(db, 0)
|
||||||
bc := chm(genesis, db)
|
bc := chm(genesis, db)
|
||||||
|
|
||||||
chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
// Insert an easy and a difficult chain afterwards
|
||||||
chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
|
if full {
|
||||||
|
bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11))
|
||||||
bc.InsertChain(chain1)
|
bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22))
|
||||||
bc.InsertChain(chain2)
|
} else {
|
||||||
|
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), false)
|
||||||
prev := bc.CurrentBlock()
|
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), false)
|
||||||
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
|
}
|
||||||
if prev.ParentHash() != block.Hash() {
|
// Check that the chain is valid number and link wise
|
||||||
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
|
if full {
|
||||||
|
prev := bc.CurrentBlock()
|
||||||
|
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
|
||||||
|
if prev.ParentHash() != block.Hash() {
|
||||||
|
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
prev := bc.CurrentHeader()
|
||||||
|
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
|
||||||
|
if prev.ParentHash != header.Hash() {
|
||||||
|
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure the chain total difficulty is the correct one
|
||||||
|
want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td))
|
||||||
|
if full {
|
||||||
|
if have := bc.GetTd(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
|
||||||
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if have := bc.GetTd(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
|
||||||
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadHashes(t *testing.T) {
|
// Tests that the insertion functions detect banned hashes.
|
||||||
|
func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
||||||
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
||||||
|
|
||||||
|
func testBadHashes(t *testing.T, full bool) {
|
||||||
|
// Create a pristine block chain
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
genesis, _ := WriteTestNetGenesisBlock(db, 0)
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
bc := chm(genesis, db)
|
bc := chm(genesis, db)
|
||||||
|
|
||||||
chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
// Create a chain, ban a hash and try to import
|
||||||
BadHashes[chain[2].Header().Hash()] = true
|
var err error
|
||||||
|
if full {
|
||||||
_, err = bc.InsertChain(chain)
|
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
||||||
|
BadHashes[blocks[2].Header().Hash()] = true
|
||||||
|
_, err = bc.InsertChain(blocks)
|
||||||
|
} else {
|
||||||
|
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
|
||||||
|
BadHashes[headers[2].Hash()] = true
|
||||||
|
_, err = bc.InsertHeaderChain(headers, true)
|
||||||
|
}
|
||||||
if !IsBadHashError(err) {
|
if !IsBadHashError(err) {
|
||||||
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
|
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReorgBadHashes(t *testing.T) {
|
// Tests that bad hashes are detected on boot, and the chan rolled back to a
|
||||||
|
// good state prior to the bad hash.
|
||||||
|
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
||||||
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
||||||
|
|
||||||
|
func testReorgBadHashes(t *testing.T, full bool) {
|
||||||
|
// Create a pristine block chain
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
genesis, _ := WriteTestNetGenesisBlock(db, 0)
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
bc := chm(genesis, db)
|
bc := chm(genesis, db)
|
||||||
|
|
||||||
chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
|
// Create a chain, import and ban aferwards
|
||||||
bc.InsertChain(chain)
|
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
|
||||||
|
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
|
||||||
|
|
||||||
if chain[3].Header().Hash() != bc.LastBlockHash() {
|
if full {
|
||||||
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
|
if _, err := bc.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("failed to import blocks: %v", err)
|
||||||
|
}
|
||||||
|
if bc.CurrentBlock().Hash() != blocks[3].Hash() {
|
||||||
|
t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
||||||
|
}
|
||||||
|
BadHashes[blocks[3].Header().Hash()] = true
|
||||||
|
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
||||||
|
} else {
|
||||||
|
if _, err := bc.InsertHeaderChain(headers, true); err != nil {
|
||||||
|
t.Fatalf("failed to import headers: %v", err)
|
||||||
|
}
|
||||||
|
if bc.CurrentHeader().Hash() != headers[3].Hash() {
|
||||||
|
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
|
||||||
|
}
|
||||||
|
BadHashes[headers[3].Hash()] = true
|
||||||
|
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
||||||
}
|
}
|
||||||
|
// Create a new chain manager and check it rolled back the state
|
||||||
// NewChainManager should check BadHashes when loading it db
|
ncm, err := NewBlockChain(db, FakePow{}, new(event.TypeMux))
|
||||||
BadHashes[chain[3].Header().Hash()] = true
|
|
||||||
|
|
||||||
var eventMux event.TypeMux
|
|
||||||
ncm, err := NewBlockChain(db, FakePow{}, &eventMux)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("NewChainManager err: %s", err)
|
t.Fatalf("failed to create new chain manager: %v", err)
|
||||||
}
|
}
|
||||||
|
if full {
|
||||||
// check it set head to (valid) parent of bad hash block
|
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
||||||
if chain[2].Header().Hash() != ncm.LastBlockHash() {
|
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
||||||
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
|
}
|
||||||
}
|
if blocks[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
|
||||||
|
t.Errorf("last block gasLimit mismatch: have: %x, want %x", ncm.GasLimit(), blocks[2].Header().GasLimit)
|
||||||
if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
|
}
|
||||||
t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
|
} else {
|
||||||
}
|
if ncm.CurrentHeader().Hash() != genesis.Hash() {
|
||||||
}
|
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), genesis.Hash())
|
||||||
|
|
||||||
func TestReorgShortest(t *testing.T) {
|
|
||||||
db, _ := ethdb.NewMemDatabase()
|
|
||||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
bc := chm(genesis, db)
|
|
||||||
|
|
||||||
chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
|
|
||||||
chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11)
|
|
||||||
|
|
||||||
bc.InsertChain(chain1)
|
|
||||||
bc.InsertChain(chain2)
|
|
||||||
|
|
||||||
prev := bc.CurrentBlock()
|
|
||||||
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
|
|
||||||
if prev.ParentHash() != block.Hash() {
|
|
||||||
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInsertNonceError(t *testing.T) {
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
||||||
|
func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
|
||||||
|
func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
|
||||||
|
|
||||||
|
func testInsertNonceError(t *testing.T, full bool) {
|
||||||
for i := 1; i < 25 && !t.Failed(); i++ {
|
for i := 1; i < 25 && !t.Failed(); i++ {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
// Create a pristine chain and database
|
||||||
genesis, err := WriteTestNetGenesisBlock(db, 0)
|
db, processor, err := newCanonical(0, full)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Fatalf("failed to create pristine chain: %v", err)
|
||||||
t.FailNow()
|
|
||||||
}
|
}
|
||||||
bc := chm(genesis, db)
|
bc := processor.bc
|
||||||
bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux)
|
|
||||||
blocks := makeChain(bc.currentBlock, i, db, 0)
|
|
||||||
|
|
||||||
fail := rand.Int() % len(blocks)
|
// Create and insert a chain with a failing nonce
|
||||||
failblock := blocks[fail]
|
var (
|
||||||
bc.pow = failPow{failblock.NumberU64()}
|
failAt int
|
||||||
n, err := bc.InsertChain(blocks)
|
failRes int
|
||||||
|
failNum uint64
|
||||||
|
failHash common.Hash
|
||||||
|
)
|
||||||
|
if full {
|
||||||
|
blocks := makeBlockChain(processor.bc.CurrentBlock(), i, db, 0)
|
||||||
|
|
||||||
|
failAt = rand.Int() % len(blocks)
|
||||||
|
failNum = blocks[failAt].NumberU64()
|
||||||
|
failHash = blocks[failAt].Hash()
|
||||||
|
|
||||||
|
processor.bc.pow = failPow{failNum}
|
||||||
|
failRes, err = processor.bc.InsertChain(blocks)
|
||||||
|
} else {
|
||||||
|
headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0)
|
||||||
|
|
||||||
|
failAt = rand.Int() % len(headers)
|
||||||
|
failNum = headers[failAt].Number.Uint64()
|
||||||
|
failHash = headers[failAt].Hash()
|
||||||
|
|
||||||
|
processor.bc.pow = failPow{failNum}
|
||||||
|
failRes, err = processor.bc.InsertHeaderChain(headers, true)
|
||||||
|
}
|
||||||
// Check that the returned error indicates the nonce failure.
|
// Check that the returned error indicates the nonce failure.
|
||||||
if n != fail {
|
if failRes != failAt {
|
||||||
t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail)
|
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
||||||
}
|
}
|
||||||
if !IsBlockNonceErr(err) {
|
if !IsBlockNonceErr(err) {
|
||||||
t.Fatalf("(i=%d) got %q, want a nonce error", i, err)
|
t.Fatalf("test %d: error mismatch: have %v, want nonce error", i, err)
|
||||||
}
|
}
|
||||||
nerr := err.(*BlockNonceErr)
|
nerr := err.(*BlockNonceErr)
|
||||||
if nerr.Number.Cmp(failblock.Number()) != 0 {
|
if nerr.Number.Uint64() != failNum {
|
||||||
t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number())
|
t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
|
||||||
}
|
}
|
||||||
if nerr.Hash != failblock.Hash() {
|
if nerr.Hash != failHash {
|
||||||
t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash())
|
t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that all no blocks after the failing block have been inserted.
|
// Check that all no blocks after the failing block have been inserted.
|
||||||
for _, block := range blocks[fail:] {
|
for j := 0; j < i-failAt; j++ {
|
||||||
if bc.HasBlock(block.Hash()) {
|
if full {
|
||||||
t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64())
|
if block := bc.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
||||||
|
t.Errorf("test %d: invalid block in chain: %v", i, block)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if header := bc.GetHeaderByNumber(failNum + uint64(j)); header != nil {
|
||||||
|
t.Errorf("test %d: invalid header in chain: %v", i, header)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -211,25 +211,49 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCanonical creates a new deterministic canonical chain by running
|
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||||
// InsertChain on the result of makeChain.
|
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||||
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
|
// header only chain.
|
||||||
|
func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
|
||||||
|
// Create te new chain database
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
evmux := &event.TypeMux{}
|
evmux := &event.TypeMux{}
|
||||||
|
|
||||||
WriteTestNetGenesisBlock(db, 0)
|
// Initialize a fresh chain with only a genesis block
|
||||||
chainman, _ := NewBlockChain(db, FakePow{}, evmux)
|
genesis, _ := WriteTestNetGenesisBlock(db, 0)
|
||||||
bman := NewBlockProcessor(db, FakePow{}, chainman, evmux)
|
|
||||||
bman.bc.SetProcessor(bman)
|
blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
|
||||||
parent := bman.bc.CurrentBlock()
|
processor := NewBlockProcessor(db, FakePow{}, blockchain, evmux)
|
||||||
|
processor.bc.SetProcessor(processor)
|
||||||
|
|
||||||
|
// Create and inject the requested chain
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return bman, nil
|
return db, processor, nil
|
||||||
}
|
}
|
||||||
lchain := makeChain(parent, n, db, canonicalSeed)
|
if full {
|
||||||
_, err := bman.bc.InsertChain(lchain)
|
// Full block-chain requested
|
||||||
return bman, err
|
blocks := makeBlockChain(genesis, n, db, canonicalSeed)
|
||||||
|
_, err := blockchain.InsertChain(blocks)
|
||||||
|
return db, processor, err
|
||||||
|
}
|
||||||
|
// Header-only chain requested
|
||||||
|
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
|
||||||
|
_, err := blockchain.InsertHeaderChain(headers, true)
|
||||||
|
return db, processor, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
|
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
|
||||||
|
func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {
|
||||||
|
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)
|
||||||
|
headers := make([]*types.Header, len(blocks))
|
||||||
|
for i, block := range blocks {
|
||||||
|
headers[i] = block.Header()
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
|
||||||
|
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
|
||||||
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
|
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
|
||||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
|
||||||
})
|
})
|
||||||
|
|
|
@ -184,7 +184,7 @@ var (
|
||||||
// are ignored and set to values derived from the given txs, uncles
|
// are ignored and set to values derived from the given txs, uncles
|
||||||
// and receipts.
|
// and receipts.
|
||||||
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
|
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
|
||||||
b := &Block{header: copyHeader(header), td: new(big.Int)}
|
b := &Block{header: CopyHeader(header), td: new(big.Int)}
|
||||||
|
|
||||||
// TODO: panic if len(txs) != len(receipts)
|
// TODO: panic if len(txs) != len(receipts)
|
||||||
if len(txs) == 0 {
|
if len(txs) == 0 {
|
||||||
|
@ -210,7 +210,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
|
||||||
b.header.UncleHash = CalcUncleHash(uncles)
|
b.header.UncleHash = CalcUncleHash(uncles)
|
||||||
b.uncles = make([]*Header, len(uncles))
|
b.uncles = make([]*Header, len(uncles))
|
||||||
for i := range uncles {
|
for i := range uncles {
|
||||||
b.uncles[i] = copyHeader(uncles[i])
|
b.uncles[i] = CopyHeader(uncles[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,10 +221,12 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
|
||||||
// header data is copied, changes to header and to the field values
|
// header data is copied, changes to header and to the field values
|
||||||
// will not affect the block.
|
// will not affect the block.
|
||||||
func NewBlockWithHeader(header *Header) *Block {
|
func NewBlockWithHeader(header *Header) *Block {
|
||||||
return &Block{header: copyHeader(header)}
|
return &Block{header: CopyHeader(header)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyHeader(h *Header) *Header {
|
// CopyHeader creates a deep copy of a block header to prevent side effects from
|
||||||
|
// modifying a header variable.
|
||||||
|
func CopyHeader(h *Header) *Header {
|
||||||
cpy := *h
|
cpy := *h
|
||||||
if cpy.Time = new(big.Int); h.Time != nil {
|
if cpy.Time = new(big.Int); h.Time != nil {
|
||||||
cpy.Time.Set(h.Time)
|
cpy.Time.Set(h.Time)
|
||||||
|
@ -326,7 +328,7 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
|
||||||
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
|
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
|
||||||
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
|
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
|
||||||
|
|
||||||
func (b *Block) Header() *Header { return copyHeader(b.header) }
|
func (b *Block) Header() *Header { return CopyHeader(b.header) }
|
||||||
|
|
||||||
func (b *Block) HashNoNonce() common.Hash {
|
func (b *Block) HashNoNonce() common.Hash {
|
||||||
return b.header.HashNoNonce()
|
return b.header.HashNoNonce()
|
||||||
|
@ -370,13 +372,13 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
|
||||||
// WithBody returns a new block with the given transaction and uncle contents.
|
// WithBody returns a new block with the given transaction and uncle contents.
|
||||||
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
|
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
|
||||||
block := &Block{
|
block := &Block{
|
||||||
header: copyHeader(b.header),
|
header: CopyHeader(b.header),
|
||||||
transactions: make([]*Transaction, len(transactions)),
|
transactions: make([]*Transaction, len(transactions)),
|
||||||
uncles: make([]*Header, len(uncles)),
|
uncles: make([]*Header, len(uncles)),
|
||||||
}
|
}
|
||||||
copy(block.transactions, transactions)
|
copy(block.transactions, transactions)
|
||||||
for i := range uncles {
|
for i := range uncles {
|
||||||
block.uncles[i] = copyHeader(uncles[i])
|
block.uncles[i] = CopyHeader(uncles[i])
|
||||||
}
|
}
|
||||||
return block
|
return block
|
||||||
}
|
}
|
||||||
|
|
|
@ -464,7 +464,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo {
|
||||||
DiscPort: int(node.UDP),
|
DiscPort: int(node.UDP),
|
||||||
TCPPort: int(node.TCP),
|
TCPPort: int(node.TCP),
|
||||||
ListenAddr: s.net.ListenAddr,
|
ListenAddr: s.net.ListenAddr,
|
||||||
Td: s.BlockChain().Td().String(),
|
Td: s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -589,15 +589,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
}
|
}
|
||||||
request.Block.ReceivedAt = msg.ReceivedAt
|
request.Block.ReceivedAt = msg.ReceivedAt
|
||||||
|
|
||||||
// Mark the block's arrival for whatever reason
|
|
||||||
_, chainHead, _ := pm.blockchain.Status()
|
|
||||||
jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{
|
|
||||||
BlockHash: request.Block.Hash().Hex(),
|
|
||||||
BlockNumber: request.Block.Number(),
|
|
||||||
ChainHeadHash: chainHead.Hex(),
|
|
||||||
BlockPrevHash: request.Block.ParentHash().Hex(),
|
|
||||||
RemoteId: p.ID().String(),
|
|
||||||
})
|
|
||||||
// Mark the peer as owning the block and schedule it for import
|
// Mark the peer as owning the block and schedule it for import
|
||||||
p.MarkBlock(request.Block.Hash())
|
p.MarkBlock(request.Block.Hash())
|
||||||
p.SetHead(request.Block.Hash())
|
p.SetHead(request.Block.Hash())
|
||||||
|
@ -607,7 +598,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
// Update the peers total difficulty if needed, schedule a download if gapped
|
// Update the peers total difficulty if needed, schedule a download if gapped
|
||||||
if request.TD.Cmp(p.Td()) > 0 {
|
if request.TD.Cmp(p.Td()) > 0 {
|
||||||
p.SetTd(request.TD)
|
p.SetTd(request.TD)
|
||||||
if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 {
|
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
|
||||||
|
if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 {
|
||||||
go pm.synchronise(p)
|
go pm.synchronise(p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -624,12 +616,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
return errResp(ErrDecode, "transaction %d is nil", i)
|
return errResp(ErrDecode, "transaction %d is nil", i)
|
||||||
}
|
}
|
||||||
p.MarkTransaction(tx.Hash())
|
p.MarkTransaction(tx.Hash())
|
||||||
|
|
||||||
// Log it's arrival for later analysis
|
|
||||||
jsonlogger.LogJson(&logger.EthTxReceived{
|
|
||||||
TxHash: tx.Hash().Hex(),
|
|
||||||
RemoteId: p.ID().String(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
pm.txpool.AddTransactions(txs)
|
pm.txpool.AddTransactions(txs)
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,8 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Make sure the peer's TD is higher than our own. If not drop.
|
// Make sure the peer's TD is higher than our own. If not drop.
|
||||||
if peer.Td().Cmp(pm.blockchain.Td()) <= 0 {
|
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
|
||||||
|
if peer.Td().Cmp(td) <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Otherwise try to sync with the downloader
|
// Otherwise try to sync with the downloader
|
||||||
|
|
|
@ -146,13 +146,7 @@ func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) {
|
||||||
if err := self.codec.Decode(req.Params, &args); err != nil {
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
return nil, shared.NewDecodeParamError(err.Error())
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
}
|
}
|
||||||
|
self.ethereum.BlockChain().SetHead(uint64(args.BlockNumber))
|
||||||
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
|
||||||
if block == nil {
|
|
||||||
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
|
|
||||||
}
|
|
||||||
|
|
||||||
self.ethereum.BlockChain().SetHead(block)
|
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue