mirror of https://github.com/status-im/op-geth.git
core, consensus: pluggable consensus engines (#3817)
This commit adds pluggable consensus engines to go-ethereum. In short, it introduces a generic consensus interface, and refactors the entire codebase to use this interface.
This commit is contained in:
parent
e50a5b7771
commit
09777952ee
|
@ -27,6 +27,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -34,7 +35,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
||||||
|
@ -61,7 +61,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||||
database, _ := ethdb.NewMemDatabase()
|
database, _ := ethdb.NewMemDatabase()
|
||||||
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
|
||||||
genesis.MustCommit(database)
|
genesis.MustCommit(database)
|
||||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||||
backend.rollback()
|
backend.rollback()
|
||||||
return backend
|
return backend
|
||||||
|
|
|
@ -20,10 +20,10 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/asm"
|
"github.com/ethereum/go-ethereum/core/asm"
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var disasmCommand = cli.Command{
|
var disasmCommand = cli.Command{
|
||||||
|
|
|
@ -303,7 +303,15 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||||
if err := stack.Service(ðereum); err != nil {
|
if err := stack.Service(ðereum); err != nil {
|
||||||
utils.Fatalf("ethereum service not running: %v", err)
|
utils.Fatalf("ethereum service not running: %v", err)
|
||||||
}
|
}
|
||||||
if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil {
|
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||||
|
type threaded interface {
|
||||||
|
SetThreads(threads int)
|
||||||
|
}
|
||||||
|
if th, ok := ethereum.Engine().(threaded); ok {
|
||||||
|
th.SetThreads(threads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := ethereum.StartMining(); err != nil {
|
||||||
utils.Fatalf("Failed to start mining: %v", err)
|
utils.Fatalf("Failed to start mining: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ func makedag(ctx *cli.Context) error {
|
||||||
utils.Fatalf("Can't find dir")
|
utils.Fatalf("Can't find dir")
|
||||||
}
|
}
|
||||||
fmt.Println("making DAG, this could take awhile...")
|
fmt.Println("making DAG, this could take awhile...")
|
||||||
pow.MakeDataset(blockNum, dir)
|
ethash.MakeDataset(blockNum, dir)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
wrongArgs()
|
wrongArgs()
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -49,7 +50,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv2"
|
whisper "github.com/ethereum/go-ethereum/whisper/whisperv2"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
@ -149,7 +149,7 @@ var (
|
||||||
}
|
}
|
||||||
TestNetFlag = cli.BoolFlag{
|
TestNetFlag = cli.BoolFlag{
|
||||||
Name: "testnet",
|
Name: "testnet",
|
||||||
Usage: "Ropsten network: pre-configured test network",
|
Usage: "Ropsten network: pre-configured proof-of-work test network",
|
||||||
}
|
}
|
||||||
DevModeFlag = cli.BoolFlag{
|
DevModeFlag = cli.BoolFlag{
|
||||||
Name: "dev",
|
Name: "dev",
|
||||||
|
@ -921,16 +921,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||||
var err error
|
var err error
|
||||||
chainDb = MakeChainDatabase(ctx, stack)
|
chainDb = MakeChainDatabase(ctx, stack)
|
||||||
|
|
||||||
seal := pow.PoW(pow.FakePow{})
|
engine := ethash.NewFaker()
|
||||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||||
seal = pow.NewFullEthash("", 1, 0, "", 1, 0)
|
engine = ethash.New("", 1, 0, "", 1, 0)
|
||||||
}
|
}
|
||||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("%v", err)
|
Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||||
chain, err = core.NewBlockChain(chainDb, config, seal, new(event.TypeMux), vmcfg)
|
chain, err = core.NewBlockChain(chainDb, config, engine, new(event.TypeMux), vmcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Can't create BlockChain: %v", err)
|
Fatalf("Can't create BlockChain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package consensus implements different Ethereum consensus engines.
|
||||||
|
package consensus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChainReader defines a small collection of methods needed to access the local
|
||||||
|
// blockchain during header and/or uncle verification.
|
||||||
|
type ChainReader interface {
|
||||||
|
// Config retrieves the blockchain's chain configuration.
|
||||||
|
Config() *params.ChainConfig
|
||||||
|
|
||||||
|
// CurrentHeader retrieves the current header from the local chain.
|
||||||
|
CurrentHeader() *types.Header
|
||||||
|
|
||||||
|
// GetHeader retrieves a block header from the database by hash and number.
|
||||||
|
GetHeader(hash common.Hash, number uint64) *types.Header
|
||||||
|
|
||||||
|
// GetHeaderByNumber retrieves a block header from the database by number.
|
||||||
|
GetHeaderByNumber(number uint64) *types.Header
|
||||||
|
|
||||||
|
// GetBlock retrieves a block from the database by hash and number.
|
||||||
|
GetBlock(hash common.Hash, number uint64) *types.Block
|
||||||
|
}
|
||||||
|
|
||||||
|
// Engine is an algorithm agnostic consensus engine.
|
||||||
|
type Engine interface {
|
||||||
|
// VerifyHeader checks whether a header conforms to the consensus rules of a
|
||||||
|
// given engine. Verifying the seal may be done optionally here, or explicitly
|
||||||
|
// via the VerifySeal method.
|
||||||
|
VerifyHeader(chain ChainReader, header *types.Header, seal bool) error
|
||||||
|
|
||||||
|
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||||
|
// concurrently. The method returns a quit channel to abort the operations and
|
||||||
|
// a results channel to retrieve the async verifications (the order is that of
|
||||||
|
// the input slice).
|
||||||
|
VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||||
|
|
||||||
|
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||||
|
// rules of a given engine.
|
||||||
|
VerifyUncles(chain ChainReader, block *types.Block) error
|
||||||
|
|
||||||
|
// VerifySeal checks whether the crypto seal on a header is valid according to
|
||||||
|
// the consensus rules of the given engine.
|
||||||
|
VerifySeal(chain ChainReader, header *types.Header) error
|
||||||
|
|
||||||
|
// Prepare initializes the consensus fields of a block header according to the
|
||||||
|
// rules of a particular engine. The changes are executed inline.
|
||||||
|
Prepare(chain ChainReader, header *types.Header) error
|
||||||
|
|
||||||
|
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||||
|
// and assembles the final block.
|
||||||
|
//
|
||||||
|
// Note, the block header and state database might be updated to reflect any
|
||||||
|
// consensus rules that happen at finalization (e.g. block rewards).
|
||||||
|
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||||
|
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||||
|
|
||||||
|
// Seal generates a new block for the given input block with the local miner's
|
||||||
|
// seal place on top.
|
||||||
|
Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error)
|
||||||
|
|
||||||
|
// APIs returns the RPC APIs this consensus engine provides.
|
||||||
|
APIs(chain ChainReader) []rpc.API
|
||||||
|
}
|
||||||
|
|
||||||
|
// PoW is a consensus engine based on proof-of-work.
|
||||||
|
type PoW interface {
|
||||||
|
Engine
|
||||||
|
|
||||||
|
// Hashrate returns the current mining hashrate of a PoW consensus engine.
|
||||||
|
Hashrate() float64
|
||||||
|
}
|
|
@ -14,7 +14,7 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
// +build !go1.8
|
// +build !go1.8
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
// cacheSize calculates and returns the size of the ethash verification cache that
|
// cacheSize calculates and returns the size of the ethash verification cache that
|
||||||
// belongs to a certain block number. The cache size grows linearly, however, we
|
// belongs to a certain block number. The cache size grows linearly, however, we
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
// +build go1.8
|
// +build go1.8
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
import "math/big"
|
import "math/big"
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
// +build go1.8
|
// +build go1.8
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -704,8 +704,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
go func(idx int) {
|
go func(idx int) {
|
||||||
defer pend.Done()
|
defer pend.Done()
|
||||||
|
|
||||||
ethash := NewFullEthash(cachedir, 0, 1, "", 0, 0)
|
ethash := New(cachedir, 0, 1, "", 0, 0)
|
||||||
if err := ethash.Verify(block); err != nil {
|
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
|
@ -713,17 +713,6 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
pend.Wait()
|
pend.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTestMode(t *testing.T) {
|
|
||||||
head := &types.Header{Difficulty: big.NewInt(100)}
|
|
||||||
ethash := NewTestEthash()
|
|
||||||
nonce, mix := ethash.Search(types.NewBlockWithHeader(head), nil)
|
|
||||||
head.Nonce = types.EncodeNonce(nonce)
|
|
||||||
copy(head.MixDigest[:], mix)
|
|
||||||
if err := ethash.Verify(types.NewBlockWithHeader(head)); err != nil {
|
|
||||||
t.Error("unexpected Verify error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmarks the cache generation performance.
|
// Benchmarks the cache generation performance.
|
||||||
func BenchmarkCacheGeneration(b *testing.B) {
|
func BenchmarkCacheGeneration(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
|
@ -0,0 +1,496 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
set "gopkg.in/fatih/set.v0"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ethash proof-of-work protocol constants.
|
||||||
|
var (
|
||||||
|
blockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||||
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidChain = errors.New("invalid header chain")
|
||||||
|
ErrParentUnknown = errors.New("parent not known locally")
|
||||||
|
ErrFutureBlock = errors.New("block in the future")
|
||||||
|
ErrLargeBlockTimestamp = errors.New("timestamp too big")
|
||||||
|
ErrZeroBlockTime = errors.New("timestamp equals parent's")
|
||||||
|
ErrInvalidNumber = errors.New("invalid block number")
|
||||||
|
ErrTooManyUncles = errors.New("too many uncles")
|
||||||
|
ErrDuplicateUncle = errors.New("duplicate uncle")
|
||||||
|
ErrUncleIsAncestor = errors.New("uncle is ancestor")
|
||||||
|
ErrDanglingUncle = errors.New("uncle's parent is not ancestor")
|
||||||
|
ErrNonceOutOfRange = errors.New("nonce out of range")
|
||||||
|
ErrInvalidDifficulty = errors.New("non-positive difficulty")
|
||||||
|
ErrInvalidMixDigest = errors.New("invalid mix digest")
|
||||||
|
ErrInvalidPoW = errors.New("invalid proof-of-work")
|
||||||
|
)
|
||||||
|
|
||||||
|
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
|
// stock Ethereum ethash engine.
|
||||||
|
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||||
|
// If we're running a full engine faking, accept any input as valid
|
||||||
|
if ethash.fakeFull {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Short circuit if the header is known, or it's parent not
|
||||||
|
number := header.Number.Uint64()
|
||||||
|
if chain.GetHeader(header.Hash(), number) != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := chain.GetHeader(header.ParentHash, number-1)
|
||||||
|
if parent == nil {
|
||||||
|
return ErrParentUnknown
|
||||||
|
}
|
||||||
|
// Sanity checks passed, do a proper verification
|
||||||
|
return ethash.verifyHeader(chain, header, parent, false, seal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||||
|
// concurrently. The method returns a quit channel to abort the operations and
|
||||||
|
// a results channel to retrieve the async verifications.
|
||||||
|
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||||
|
// If we're running a full engine faking, accept any input as valid
|
||||||
|
if ethash.fakeFull {
|
||||||
|
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||||
|
for i := 0; i < len(headers); i++ {
|
||||||
|
results <- nil
|
||||||
|
}
|
||||||
|
return abort, results
|
||||||
|
}
|
||||||
|
// Spawn as many workers as allowed threads
|
||||||
|
workers := runtime.GOMAXPROCS(0)
|
||||||
|
if len(headers) < workers {
|
||||||
|
workers = len(headers)
|
||||||
|
}
|
||||||
|
// Create a task channel and spawn the verifiers
|
||||||
|
type result struct {
|
||||||
|
index int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
inputs := make(chan int, workers)
|
||||||
|
outputs := make(chan result, len(headers))
|
||||||
|
|
||||||
|
var badblock uint64
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
go func() {
|
||||||
|
for index := range inputs {
|
||||||
|
// If we've found a bad block already before this, stop validating
|
||||||
|
if bad := atomic.LoadUint64(&badblock); bad != 0 && bad <= headers[index].Number.Uint64() {
|
||||||
|
outputs <- result{index: index, err: ErrInvalidChain}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// We need to look up the first parent
|
||||||
|
var parent *types.Header
|
||||||
|
if index == 0 {
|
||||||
|
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||||
|
} else if headers[index-1].Hash() == headers[index].ParentHash {
|
||||||
|
parent = headers[index-1]
|
||||||
|
}
|
||||||
|
// Ensure the validation is useful and execute it
|
||||||
|
var failure error
|
||||||
|
switch {
|
||||||
|
case chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()-1) != nil:
|
||||||
|
outputs <- result{index: index, err: nil}
|
||||||
|
case parent == nil:
|
||||||
|
failure = ErrParentUnknown
|
||||||
|
outputs <- result{index: index, err: failure}
|
||||||
|
default:
|
||||||
|
failure = ethash.verifyHeader(chain, headers[index], parent, false, seals[index])
|
||||||
|
outputs <- result{index: index, err: failure}
|
||||||
|
}
|
||||||
|
// If a validation failure occurred, mark subsequent blocks invalid
|
||||||
|
if failure != nil {
|
||||||
|
number := headers[index].Number.Uint64()
|
||||||
|
if prev := atomic.LoadUint64(&badblock); prev == 0 || prev > number {
|
||||||
|
// This two step atomic op isn't thread-safe in that `badblock` might end
|
||||||
|
// up slightly higher than the block number of the first failure (if many
|
||||||
|
// workers try to write at the same time), but it's fine as we're mostly
|
||||||
|
// interested to avoid large useless work, we don't care about 1-2 extra
|
||||||
|
// runs. Doing "full thread safety" would involve mutexes, which would be
|
||||||
|
// a noticeable sync overhead on the fast spinning worker routines.
|
||||||
|
atomic.StoreUint64(&badblock, number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
// Feed item indices to the workers until done, sorting and feeding the results to the caller
|
||||||
|
dones := make([]bool, len(headers))
|
||||||
|
errors := make([]error, len(headers))
|
||||||
|
|
||||||
|
abort := make(chan struct{})
|
||||||
|
returns := make(chan error, len(headers))
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(inputs)
|
||||||
|
|
||||||
|
input, output := 0, 0
|
||||||
|
for i := 0; i < len(headers)*2; i++ {
|
||||||
|
var res result
|
||||||
|
|
||||||
|
// If there are tasks left, push to workers
|
||||||
|
if input < len(headers) {
|
||||||
|
select {
|
||||||
|
case inputs <- input:
|
||||||
|
input++
|
||||||
|
continue
|
||||||
|
case <-abort:
|
||||||
|
return
|
||||||
|
case res = <-outputs:
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise keep waiting for results
|
||||||
|
select {
|
||||||
|
case <-abort:
|
||||||
|
return
|
||||||
|
case res = <-outputs:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// A result arrived, save and propagate if next
|
||||||
|
dones[res.index], errors[res.index] = true, res.err
|
||||||
|
for output < len(headers) && dones[output] {
|
||||||
|
returns <- errors[output]
|
||||||
|
output++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return abort, returns
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||||
|
// rules of the stock Ethereum ethash engine.
|
||||||
|
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
||||||
|
// If we're running a full engine faking, accept any input as valid
|
||||||
|
if ethash.fakeFull {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Verify that there are at most 2 uncles included in this block
|
||||||
|
if len(block.Uncles()) > maxUncles {
|
||||||
|
return ErrTooManyUncles
|
||||||
|
}
|
||||||
|
// Gather the set of past uncles and ancestors
|
||||||
|
uncles, ancestors := set.New(), make(map[common.Hash]*types.Header)
|
||||||
|
|
||||||
|
number, parent := block.NumberU64()-1, block.ParentHash()
|
||||||
|
for i := 0; i < 7; i++ {
|
||||||
|
ancestor := chain.GetBlock(parent, number)
|
||||||
|
if ancestor == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ancestors[ancestor.Hash()] = ancestor.Header()
|
||||||
|
for _, uncle := range ancestor.Uncles() {
|
||||||
|
uncles.Add(uncle.Hash())
|
||||||
|
}
|
||||||
|
parent, number = ancestor.ParentHash(), number-1
|
||||||
|
}
|
||||||
|
ancestors[block.Hash()] = block.Header()
|
||||||
|
uncles.Add(block.Hash())
|
||||||
|
|
||||||
|
// Verify each of the uncles that it's recent, but not an ancestor
|
||||||
|
for _, uncle := range block.Uncles() {
|
||||||
|
// Make sure every uncle is rewarded only once
|
||||||
|
hash := uncle.Hash()
|
||||||
|
if uncles.Has(hash) {
|
||||||
|
return ErrDuplicateUncle
|
||||||
|
}
|
||||||
|
uncles.Add(hash)
|
||||||
|
|
||||||
|
// Make sure the uncle has a valid ancestry
|
||||||
|
if ancestors[hash] != nil {
|
||||||
|
return ErrUncleIsAncestor
|
||||||
|
}
|
||||||
|
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
|
||||||
|
return ErrDanglingUncle
|
||||||
|
}
|
||||||
|
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
|
// stock Ethereum ethash engine.
|
||||||
|
//
|
||||||
|
// See YP section 4.3.4. "Block Header Validity"
|
||||||
|
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||||
|
// Ensure that the header's extra-data section is of a reasonable size
|
||||||
|
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||||
|
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||||
|
}
|
||||||
|
// Verify the header's timestamp
|
||||||
|
if uncle {
|
||||||
|
if header.Time.Cmp(math.MaxBig256) > 0 {
|
||||||
|
return ErrLargeBlockTimestamp
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
||||||
|
return ErrFutureBlock
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if header.Time.Cmp(parent.Time) <= 0 {
|
||||||
|
return ErrZeroBlockTime
|
||||||
|
}
|
||||||
|
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||||
|
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||||
|
if expected.Cmp(header.Difficulty) != 0 {
|
||||||
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||||
|
}
|
||||||
|
// Verify that the gas limit remains within allowed bounds
|
||||||
|
diff := new(big.Int).Set(parent.GasLimit)
|
||||||
|
diff = diff.Sub(diff, header.GasLimit)
|
||||||
|
diff.Abs(diff)
|
||||||
|
|
||||||
|
limit := new(big.Int).Set(parent.GasLimit)
|
||||||
|
limit = limit.Div(limit, params.GasLimitBoundDivisor)
|
||||||
|
|
||||||
|
if diff.Cmp(limit) >= 0 || header.GasLimit.Cmp(params.MinGasLimit) < 0 {
|
||||||
|
return fmt.Errorf("invalid gas limit: have %v, want %v += %v", header.GasLimit, parent.GasLimit, limit)
|
||||||
|
}
|
||||||
|
// Verify that the block number is parent's +1
|
||||||
|
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
|
||||||
|
return ErrInvalidNumber
|
||||||
|
}
|
||||||
|
// Verify the engine specific seal securing the block
|
||||||
|
if seal {
|
||||||
|
if err := ethash.VerifySeal(chain, header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If all checks passed, validate any special fields for hard forks
|
||||||
|
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||||
|
// that a new block should have when created at time given the parent block's time
|
||||||
|
// and difficulty.
|
||||||
|
//
|
||||||
|
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||||
|
func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||||
|
if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
|
||||||
|
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
|
||||||
|
}
|
||||||
|
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some weird constants to avoid constant memory allocs for them.
|
||||||
|
var (
|
||||||
|
expDiffPeriod = big.NewInt(100000)
|
||||||
|
big10 = big.NewInt(10)
|
||||||
|
bigMinus99 = big.NewInt(-99)
|
||||||
|
)
|
||||||
|
|
||||||
|
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
|
||||||
|
// the difficulty that a new block should have when created at time given the
|
||||||
|
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
||||||
|
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||||
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
||||||
|
// algorithm:
|
||||||
|
// diff = (parent_diff +
|
||||||
|
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||||
|
// ) + 2^(periodCount - 2)
|
||||||
|
|
||||||
|
bigTime := new(big.Int).SetUint64(time)
|
||||||
|
bigParentTime := new(big.Int).SetUint64(parentTime)
|
||||||
|
|
||||||
|
// holds intermediate values to make the algo easier to read & audit
|
||||||
|
x := new(big.Int)
|
||||||
|
y := new(big.Int)
|
||||||
|
|
||||||
|
// 1 - (block_timestamp -parent_timestamp) // 10
|
||||||
|
x.Sub(bigTime, bigParentTime)
|
||||||
|
x.Div(x, big10)
|
||||||
|
x.Sub(common.Big1, x)
|
||||||
|
|
||||||
|
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
|
||||||
|
if x.Cmp(bigMinus99) < 0 {
|
||||||
|
x.Set(bigMinus99)
|
||||||
|
}
|
||||||
|
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||||
|
y.Div(parentDiff, params.DifficultyBoundDivisor)
|
||||||
|
x.Mul(y, x)
|
||||||
|
x.Add(parentDiff, x)
|
||||||
|
|
||||||
|
// minimum difficulty can ever be (before exponential factor)
|
||||||
|
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||||
|
x.Set(params.MinimumDifficulty)
|
||||||
|
}
|
||||||
|
// for the exponential factor
|
||||||
|
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||||
|
periodCount.Div(periodCount, expDiffPeriod)
|
||||||
|
|
||||||
|
// the exponential factor, commonly referred to as "the bomb"
|
||||||
|
// diff = diff + 2^(periodCount - 2)
|
||||||
|
if periodCount.Cmp(common.Big1) > 0 {
|
||||||
|
y.Sub(periodCount, common.Big2)
|
||||||
|
y.Exp(common.Big2, y, nil)
|
||||||
|
x.Add(x, y)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
|
||||||
|
// difficulty that a new block should have when created at time given the parent
|
||||||
|
// block's time and difficulty. The calculation uses the Frontier rules.
|
||||||
|
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||||
|
diff := new(big.Int)
|
||||||
|
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
|
||||||
|
bigTime := new(big.Int)
|
||||||
|
bigParentTime := new(big.Int)
|
||||||
|
|
||||||
|
bigTime.SetUint64(time)
|
||||||
|
bigParentTime.SetUint64(parentTime)
|
||||||
|
|
||||||
|
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
||||||
|
diff.Add(parentDiff, adjust)
|
||||||
|
} else {
|
||||||
|
diff.Sub(parentDiff, adjust)
|
||||||
|
}
|
||||||
|
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||||
|
diff.Set(params.MinimumDifficulty)
|
||||||
|
}
|
||||||
|
|
||||||
|
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||||
|
periodCount.Div(periodCount, expDiffPeriod)
|
||||||
|
if periodCount.Cmp(common.Big1) > 0 {
|
||||||
|
// diff = diff + 2^(periodCount - 2)
|
||||||
|
expDiff := periodCount.Sub(periodCount, common.Big2)
|
||||||
|
expDiff.Exp(common.Big2, expDiff, nil)
|
||||||
|
diff.Add(diff, expDiff)
|
||||||
|
diff = math.BigMax(diff, params.MinimumDifficulty)
|
||||||
|
}
|
||||||
|
return diff
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||||
|
// the PoW difficulty requirements.
|
||||||
|
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||||
|
// If we're running a fake PoW, accept any seal as valid
|
||||||
|
if ethash.fakeMode {
|
||||||
|
time.Sleep(ethash.fakeDelay)
|
||||||
|
if ethash.fakeFail == header.Number.Uint64() {
|
||||||
|
return ErrInvalidPoW
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If we're running a shared PoW, delegate verification to it
|
||||||
|
if ethash.shared != nil {
|
||||||
|
return ethash.shared.VerifySeal(chain, header)
|
||||||
|
}
|
||||||
|
// Sanity check that the block number is below the lookup table size (60M blocks)
|
||||||
|
number := header.Number.Uint64()
|
||||||
|
if number/epochLength >= uint64(len(cacheSizes)) {
|
||||||
|
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
|
||||||
|
return ErrNonceOutOfRange
|
||||||
|
}
|
||||||
|
// Ensure that we have a valid difficulty for the block
|
||||||
|
if header.Difficulty.Sign() <= 0 {
|
||||||
|
return ErrInvalidDifficulty
|
||||||
|
}
|
||||||
|
// Recompute the digest and PoW value and verify against the header
|
||||||
|
cache := ethash.cache(number)
|
||||||
|
|
||||||
|
size := datasetSize(number)
|
||||||
|
if ethash.tester {
|
||||||
|
size = 32 * 1024
|
||||||
|
}
|
||||||
|
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||||
|
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||||
|
return ErrInvalidMixDigest
|
||||||
|
}
|
||||||
|
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
||||||
|
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||||
|
return ErrInvalidPoW
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
||||||
|
// header to conform to the ethash protocol. The changes are done inline.
|
||||||
|
func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||||
|
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||||
|
if parent == nil {
|
||||||
|
return ErrParentUnknown
|
||||||
|
}
|
||||||
|
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(),
|
||||||
|
parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
|
||||||
|
// setting the final state and assembling the block.
|
||||||
|
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||||
|
// Accumulate any block and uncle rewards and commit the final state root
|
||||||
|
AccumulateRewards(state, header, uncles)
|
||||||
|
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||||
|
|
||||||
|
// Header seems complete, assemble into a block and return
|
||||||
|
return types.NewBlock(header, txs, uncles, receipts), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some weird constants to avoid constant memory allocs for them.
|
||||||
|
var (
|
||||||
|
big8 = big.NewInt(8)
|
||||||
|
big32 = big.NewInt(32)
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||||
|
// reward. The total reward consists of the static block reward and rewards for
|
||||||
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||||
|
//
|
||||||
|
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||||
|
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||||
|
reward := new(big.Int).Set(blockReward)
|
||||||
|
r := new(big.Int)
|
||||||
|
for _, uncle := range uncles {
|
||||||
|
r.Add(uncle.Number, big8)
|
||||||
|
r.Sub(r, header.Number)
|
||||||
|
r.Mul(r, blockReward)
|
||||||
|
r.Div(r, big8)
|
||||||
|
state.AddBalance(uncle.Coinbase, r)
|
||||||
|
|
||||||
|
r.Div(blockReward, big32)
|
||||||
|
reward.Add(reward, r)
|
||||||
|
}
|
||||||
|
state.AddBalance(header.Coinbase, reward)
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
type diffTest struct {
|
||||||
|
ParentTimestamp uint64
|
||||||
|
ParentDifficulty *big.Int
|
||||||
|
CurrentTimestamp uint64
|
||||||
|
CurrentBlocknumber *big.Int
|
||||||
|
CurrentDifficulty *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var ext struct {
|
||||||
|
ParentTimestamp string
|
||||||
|
ParentDifficulty string
|
||||||
|
CurrentTimestamp string
|
||||||
|
CurrentBlocknumber string
|
||||||
|
CurrentDifficulty string
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(b, &ext); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
|
||||||
|
d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
|
||||||
|
d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
|
||||||
|
d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
|
||||||
|
d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCalcDifficulty(t *testing.T) {
|
||||||
|
file, err := os.Open("../../tests/files/BasicTests/difficulty.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
tests := make(map[string]diffTest)
|
||||||
|
err = json.NewDecoder(file).Decode(&tests)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
||||||
|
for name, test := range tests {
|
||||||
|
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||||
|
diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
|
||||||
|
if diff.Cmp(test.CurrentDifficulty) != 0 {
|
||||||
|
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -14,10 +14,10 @@
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package pow
|
// Package ethash implements the ethash proof-of-work consensus engine.
|
||||||
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
@ -32,24 +32,20 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
mmap "github.com/edsrzf/mmap-go"
|
mmap "github.com/edsrzf/mmap-go"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
metrics "github.com/rcrowley/go-metrics"
|
metrics "github.com/rcrowley/go-metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||||
ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
|
||||||
ErrNonceOutOfRange = errors.New("nonce out of range")
|
|
||||||
ErrInvalidDifficulty = errors.New("non-positive difficulty")
|
|
||||||
ErrInvalidMixDigest = errors.New("invalid mix digest")
|
|
||||||
ErrInvalidPoW = errors.New("pow difficulty invalid")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// maxUint256 is a big integer representing 2^256-1
|
// maxUint256 is a big integer representing 2^256-1
|
||||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||||
|
|
||||||
// sharedEthash is a full instance that can be shared between multiple users.
|
// sharedEthash is a full instance that can be shared between multiple users.
|
||||||
sharedEthash = NewFullEthash("", 3, 0, "", 1, 0)
|
sharedEthash = New("", 3, 0, "", 1, 0)
|
||||||
|
|
||||||
// algorithmRevision is the data structure version used for file naming.
|
// algorithmRevision is the data structure version used for file naming.
|
||||||
algorithmRevision = 23
|
algorithmRevision = 23
|
||||||
|
@ -321,7 +317,8 @@ func MakeDataset(block uint64, dir string) {
|
||||||
d.release()
|
d.release()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ethash is a PoW data struture implementing the ethash algorithm.
|
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||||
|
// algorithm.
|
||||||
type Ethash struct {
|
type Ethash struct {
|
||||||
cachedir string // Data directory to store the verification caches
|
cachedir string // Data directory to store the verification caches
|
||||||
cachesinmem int // Number of caches to keep in memory
|
cachesinmem int // Number of caches to keep in memory
|
||||||
|
@ -334,15 +331,26 @@ type Ethash struct {
|
||||||
fcache *cache // Pre-generated cache for the estimated future epoch
|
fcache *cache // Pre-generated cache for the estimated future epoch
|
||||||
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
|
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
|
||||||
fdataset *dataset // Pre-generated dataset for the estimated future epoch
|
fdataset *dataset // Pre-generated dataset for the estimated future epoch
|
||||||
lock sync.Mutex // Ensures thread safety for the in-memory caches
|
|
||||||
|
|
||||||
|
// Mining related fields
|
||||||
|
rand *rand.Rand // Properly seeded random source for nonces
|
||||||
|
threads int // Number of threads to mine on if mining
|
||||||
|
update chan struct{} // Notification channel to update mining parameters
|
||||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||||
|
|
||||||
tester bool // Flag whether to use a smaller test dataset
|
// The fields below are hooks for testing
|
||||||
|
tester bool // Flag whether to use a smaller test dataset
|
||||||
|
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||||
|
fakeMode bool // Flag whether to disable PoW checking
|
||||||
|
fakeFull bool // Flag whether to disable all consensus rules
|
||||||
|
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||||
|
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||||
|
|
||||||
|
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFullEthash creates a full sized ethash PoW scheme.
|
// New creates a full sized ethash PoW scheme.
|
||||||
func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) PoW {
|
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
||||||
if cachesinmem <= 0 {
|
if cachesinmem <= 0 {
|
||||||
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
|
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
|
||||||
cachesinmem = 1
|
cachesinmem = 1
|
||||||
|
@ -362,58 +370,55 @@ func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string
|
||||||
dagsondisk: dagsondisk,
|
dagsondisk: dagsondisk,
|
||||||
caches: make(map[uint64]*cache),
|
caches: make(map[uint64]*cache),
|
||||||
datasets: make(map[uint64]*dataset),
|
datasets: make(map[uint64]*dataset),
|
||||||
|
update: make(chan struct{}),
|
||||||
hashrate: metrics.NewMeter(),
|
hashrate: metrics.NewMeter(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestEthash creates a small sized ethash PoW scheme useful only for testing
|
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||||
// purposes.
|
// purposes.
|
||||||
func NewTestEthash() PoW {
|
func NewTester() *Ethash {
|
||||||
return &Ethash{
|
return &Ethash{
|
||||||
cachesinmem: 1,
|
cachesinmem: 1,
|
||||||
caches: make(map[uint64]*cache),
|
caches: make(map[uint64]*cache),
|
||||||
datasets: make(map[uint64]*dataset),
|
datasets: make(map[uint64]*dataset),
|
||||||
tester: true,
|
tester: true,
|
||||||
|
update: make(chan struct{}),
|
||||||
hashrate: metrics.NewMeter(),
|
hashrate: metrics.NewMeter(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSharedEthash creates a full sized ethash PoW shared between all requesters
|
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||||
// running in the same process.
|
// all blocks' seal as valid, though they still have to conform to the Ethereum
|
||||||
func NewSharedEthash() PoW {
|
// consensus rules.
|
||||||
return sharedEthash
|
func NewFaker() *Ethash {
|
||||||
|
return &Ethash{fakeMode: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify implements PoW, checking whether the given block satisfies the PoW
|
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
|
||||||
// difficulty requirements.
|
// accepts all blocks as valid apart from the single one specified, though they
|
||||||
func (ethash *Ethash) Verify(block Block) error {
|
// still have to conform to the Ethereum consensus rules.
|
||||||
// Sanity check that the block number is below the lookup table size (60M blocks)
|
func NewFakeFailer(fail uint64) *Ethash {
|
||||||
number := block.NumberU64()
|
return &Ethash{fakeMode: true, fakeFail: fail}
|
||||||
if number/epochLength >= uint64(len(cacheSizes)) {
|
}
|
||||||
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
|
|
||||||
return ErrNonceOutOfRange
|
|
||||||
}
|
|
||||||
// Ensure that we have a valid difficulty for the block
|
|
||||||
difficulty := block.Difficulty()
|
|
||||||
if difficulty.Sign() <= 0 {
|
|
||||||
return ErrInvalidDifficulty
|
|
||||||
}
|
|
||||||
// Recompute the digest and PoW value and verify against the block
|
|
||||||
cache := ethash.cache(number)
|
|
||||||
|
|
||||||
size := datasetSize(number)
|
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
|
||||||
if ethash.tester {
|
// accepts all blocks as valid, but delays verifications by some time, though
|
||||||
size = 32 * 1024
|
// they still have to conform to the Ethereum consensus rules.
|
||||||
}
|
func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||||
digest, result := hashimotoLight(size, cache, block.HashNoNonce().Bytes(), block.Nonce())
|
return &Ethash{fakeMode: true, fakeDelay: delay}
|
||||||
if !bytes.Equal(block.MixDigest().Bytes(), digest) {
|
}
|
||||||
return ErrInvalidMixDigest
|
|
||||||
}
|
// NewFullFaker creates a ethash consensus engine with a full fake scheme that
|
||||||
target := new(big.Int).Div(maxUint256, difficulty)
|
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
func NewFullFaker() *Ethash {
|
||||||
return ErrInvalidPoW
|
return &Ethash{fakeMode: true, fakeFull: true}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// NewShared creates a full sized ethash PoW shared between all requesters running
|
||||||
|
// in the same process.
|
||||||
|
func NewShared() *Ethash {
|
||||||
|
return &Ethash{shared: sharedEthash}
|
||||||
}
|
}
|
||||||
|
|
||||||
// cache tries to retrieve a verification cache for the specified block number
|
// cache tries to retrieve a verification cache for the specified block number
|
||||||
|
@ -477,43 +482,6 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||||
return current.cache
|
return current.cache
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search implements PoW, attempting to find a nonce that satisfies the block's
|
|
||||||
// difficulty requirements.
|
|
||||||
func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
|
|
||||||
var (
|
|
||||||
hash = block.HashNoNonce().Bytes()
|
|
||||||
diff = block.Difficulty()
|
|
||||||
target = new(big.Int).Div(maxUint256, diff)
|
|
||||||
dataset = ethash.dataset(block.NumberU64())
|
|
||||||
rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
nonce = uint64(rand.Int63())
|
|
||||||
attempts int64
|
|
||||||
)
|
|
||||||
// Start generating random nonces until we abort or find a good one
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-stop:
|
|
||||||
// Mining terminated, update stats and abort
|
|
||||||
ethash.hashrate.Mark(attempts)
|
|
||||||
return 0, nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
|
||||||
attempts++
|
|
||||||
if (attempts % (1 << 15)) == 0 {
|
|
||||||
ethash.hashrate.Mark(attempts)
|
|
||||||
attempts = 0
|
|
||||||
}
|
|
||||||
// Compute the PoW value of this nonce
|
|
||||||
digest, result := hashimotoFull(dataset, hash, nonce)
|
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
|
||||||
return nonce, digest
|
|
||||||
}
|
|
||||||
nonce++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataset tries to retrieve a mining dataset for the specified block number
|
// dataset tries to retrieve a mining dataset for the specified block number
|
||||||
// by first checking against a list of in-memory datasets, then against DAGs
|
// by first checking against a list of in-memory datasets, then against DAGs
|
||||||
// stored on disk, and finally generating one if none can be found.
|
// stored on disk, and finally generating one if none can be found.
|
||||||
|
@ -576,14 +544,44 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||||
return current.dataset
|
return current.dataset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Threads returns the number of mining threads currently enabled. This doesn't
|
||||||
|
// necessarily mean that mining is running!
|
||||||
|
func (ethash *Ethash) Threads() int {
|
||||||
|
ethash.lock.Lock()
|
||||||
|
defer ethash.lock.Unlock()
|
||||||
|
|
||||||
|
return ethash.threads
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetThreads updates the number of mining threads currently enabled. Calling
|
||||||
|
// this method does not start mining, only sets the thread count. If zero is
|
||||||
|
// specified, the miner will use all cores of the machine.
|
||||||
|
func (ethash *Ethash) SetThreads(threads int) {
|
||||||
|
ethash.lock.Lock()
|
||||||
|
defer ethash.lock.Unlock()
|
||||||
|
|
||||||
|
// Update the threads and ping any running seal to pull in any changes
|
||||||
|
ethash.threads = threads
|
||||||
|
select {
|
||||||
|
case ethash.update <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Hashrate implements PoW, returning the measured rate of the search invocations
|
// Hashrate implements PoW, returning the measured rate of the search invocations
|
||||||
// per second over the last minute.
|
// per second over the last minute.
|
||||||
func (ethash *Ethash) Hashrate() float64 {
|
func (ethash *Ethash) Hashrate() float64 {
|
||||||
return ethash.hashrate.Rate1()
|
return ethash.hashrate.Rate1()
|
||||||
}
|
}
|
||||||
|
|
||||||
// EthashSeedHash is the seed to use for generating a vrification cache and the
|
// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
|
||||||
// mining dataset.
|
// that is empty.
|
||||||
func EthashSeedHash(block uint64) []byte {
|
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeedHash is the seed to use for generating a verification cache and the mining
|
||||||
|
// dataset.
|
||||||
|
func SeedHash(block uint64) []byte {
|
||||||
return seedHash(block)
|
return seedHash(block)
|
||||||
}
|
}
|
|
@ -0,0 +1,40 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests that ethash works correctly in test mode.
|
||||||
|
func TestTestMode(t *testing.T) {
|
||||||
|
head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||||
|
|
||||||
|
ethash := NewTester()
|
||||||
|
block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to seal block: %v", err)
|
||||||
|
}
|
||||||
|
head.Nonce = types.EncodeNonce(block.Nonce())
|
||||||
|
head.MixDigest = block.MixDigest()
|
||||||
|
if err := ethash.VerifySeal(nil, head); err != nil {
|
||||||
|
t.Fatalf("unexpected verification error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,146 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||||
|
// the block's difficulty requirements.
|
||||||
|
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||||
|
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||||
|
if ethash.fakeMode {
|
||||||
|
header := block.Header()
|
||||||
|
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
||||||
|
return block.WithSeal(header), nil
|
||||||
|
}
|
||||||
|
// If we're running a shared PoW, delegate sealing to it
|
||||||
|
if ethash.shared != nil {
|
||||||
|
return ethash.shared.Seal(chain, block, stop)
|
||||||
|
}
|
||||||
|
// Create a runner and the multiple search threads it directs
|
||||||
|
abort := make(chan struct{})
|
||||||
|
found := make(chan *types.Block)
|
||||||
|
|
||||||
|
ethash.lock.Lock()
|
||||||
|
threads := ethash.threads
|
||||||
|
if ethash.rand == nil {
|
||||||
|
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||||
|
if err != nil {
|
||||||
|
ethash.lock.Unlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
|
||||||
|
}
|
||||||
|
ethash.lock.Unlock()
|
||||||
|
if threads == 0 {
|
||||||
|
threads = runtime.NumCPU()
|
||||||
|
}
|
||||||
|
var pend sync.WaitGroup
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
pend.Add(1)
|
||||||
|
go func(id int, nonce uint64) {
|
||||||
|
defer pend.Done()
|
||||||
|
ethash.mine(block, id, nonce, abort, found)
|
||||||
|
}(i, uint64(ethash.rand.Int63()))
|
||||||
|
}
|
||||||
|
// Wait until sealing is terminated or a nonce is found
|
||||||
|
var result *types.Block
|
||||||
|
select {
|
||||||
|
case <-stop:
|
||||||
|
// Outside abort, stop all miner threads
|
||||||
|
close(abort)
|
||||||
|
case result = <-found:
|
||||||
|
// One of the threads found a block, abort all others
|
||||||
|
close(abort)
|
||||||
|
case <-ethash.update:
|
||||||
|
// Thread count was changed on user request, restart
|
||||||
|
close(abort)
|
||||||
|
pend.Wait()
|
||||||
|
return ethash.Seal(chain, block, stop)
|
||||||
|
}
|
||||||
|
// Wait for all miners to terminate and return the block
|
||||||
|
pend.Wait()
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mine is the actual proof-of-work miner that searches for a nonce starting from
|
||||||
|
// seed that results in correct final block difficulty.
|
||||||
|
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
||||||
|
// Extract some data from the header
|
||||||
|
var (
|
||||||
|
header = block.Header()
|
||||||
|
hash = header.HashNoNonce().Bytes()
|
||||||
|
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||||
|
|
||||||
|
number = header.Number.Uint64()
|
||||||
|
dataset = ethash.dataset(number)
|
||||||
|
)
|
||||||
|
// Start generating random nonces until we abort or find a good one
|
||||||
|
var (
|
||||||
|
attempts = int64(0)
|
||||||
|
nonce = seed
|
||||||
|
)
|
||||||
|
logger := log.New("miner", id)
|
||||||
|
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-abort:
|
||||||
|
// Mining terminated, update stats and abort
|
||||||
|
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
||||||
|
ethash.hashrate.Mark(attempts)
|
||||||
|
return
|
||||||
|
|
||||||
|
default:
|
||||||
|
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
||||||
|
attempts++
|
||||||
|
if (attempts % (1 << 15)) == 0 {
|
||||||
|
ethash.hashrate.Mark(attempts)
|
||||||
|
attempts = 0
|
||||||
|
}
|
||||||
|
// Compute the PoW value of this nonce
|
||||||
|
digest, result := hashimotoFull(dataset, hash, nonce)
|
||||||
|
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
||||||
|
// Correct nonce found, create a new header with it
|
||||||
|
header = types.CopyHeader(header)
|
||||||
|
header.Nonce = types.EncodeNonce(nonce)
|
||||||
|
header.MixDigest = common.BytesToHash(digest)
|
||||||
|
|
||||||
|
// Seal and return a block (if still needed)
|
||||||
|
select {
|
||||||
|
case found <- block.WithSeal(header):
|
||||||
|
logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
|
||||||
|
case <-abort:
|
||||||
|
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nonce++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
// Source: https://golang.org/src/crypto/cipher/xor.go
|
// Source: https://golang.org/src/crypto/cipher/xor.go
|
||||||
|
|
||||||
package pow
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
|
@ -0,0 +1,85 @@
|
||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package misc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrBadProDAOExtra is returned if a header doens't support the DAO fork on a
|
||||||
|
// pro-fork client.
|
||||||
|
ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data")
|
||||||
|
|
||||||
|
// ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no-
|
||||||
|
// fork client.
|
||||||
|
ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data")
|
||||||
|
)
|
||||||
|
|
||||||
|
// VerifyDAOHeaderExtraData validates the extra-data field of a block header to
|
||||||
|
// ensure it conforms to DAO hard-fork rules.
|
||||||
|
//
|
||||||
|
// DAO hard-fork extension to the header validity:
|
||||||
|
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
|
||||||
|
// with the fork specific extra-data set
|
||||||
|
// b) if the node is pro-fork, require blocks in the specific range to have the
|
||||||
|
// unique extra-data set.
|
||||||
|
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
|
||||||
|
// Short circuit validation if the node doesn't care about the DAO fork
|
||||||
|
if config.DAOForkBlock == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Make sure the block is within the fork's modified extra-data range
|
||||||
|
limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange)
|
||||||
|
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Depending whether we support or oppose the fork, validate the extra-data contents
|
||||||
|
if config.DAOForkSupport {
|
||||||
|
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||||
|
return ErrBadProDAOExtra
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||||
|
return ErrBadNoDAOExtra
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// All ok, header has the same extra-data we expect
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
|
||||||
|
// rules, transferring all balances of a set of DAO accounts to a single refund
|
||||||
|
// contract.
|
||||||
|
func ApplyDAOHardFork(statedb *state.StateDB) {
|
||||||
|
// Retrieve the contract to refund balances into
|
||||||
|
if !statedb.Exist(params.DAORefundContract) {
|
||||||
|
statedb.CreateAccount(params.DAORefundContract)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move every DAO account and extra-balance account funds into the refund contract
|
||||||
|
for _, addr := range params.DAODrainList() {
|
||||||
|
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
|
||||||
|
statedb.SetBalance(addr, new(big.Int))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package misc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// VerifyForkHashes verifies that blocks conforming to network hard-forks do have
|
||||||
|
// the correct hashes, to avoid clients going off on different chains. This is an
|
||||||
|
// optional feature.
|
||||||
|
func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bool) error {
|
||||||
|
// We don't care about uncles
|
||||||
|
if uncle {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If the homestead reprice hash is set, validate it
|
||||||
|
if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
|
||||||
|
if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
|
||||||
|
return fmt.Errorf("homestead gas reprice fork: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// All ok, return
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -25,13 +25,13 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
|
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
|
||||||
|
@ -176,7 +176,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||||
// Time the insertion of the new chain.
|
// Time the insertion of the new chain.
|
||||||
// State and blocks are stored in the same DB.
|
// State and blocks are stored in the same DB.
|
||||||
evmux := new(event.TypeMux)
|
evmux := new(event.TypeMux)
|
||||||
chainman, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
|
chainman, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
|
||||||
defer chainman.Stop()
|
defer chainman.Stop()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
@ -286,7 +286,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
chain, err := NewBlockChain(db, params.TestChainConfig, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
chain, err := NewBlockChain(db, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error creating chain: %v", err)
|
b.Fatalf("error creating chain: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,22 +19,12 @@ package core
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"gopkg.in/fatih/set.v0"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ExpDiffPeriod = big.NewInt(100000)
|
|
||||||
big10 = big.NewInt(10)
|
|
||||||
bigMinus99 = big.NewInt(-99)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockValidator is responsible for validating block headers, uncles and
|
// BlockValidator is responsible for validating block headers, uncles and
|
||||||
|
@ -44,30 +34,24 @@ var (
|
||||||
type BlockValidator struct {
|
type BlockValidator struct {
|
||||||
config *params.ChainConfig // Chain configuration options
|
config *params.ChainConfig // Chain configuration options
|
||||||
bc *BlockChain // Canonical block chain
|
bc *BlockChain // Canonical block chain
|
||||||
Pow pow.PoW // Proof of work used for validating
|
engine consensus.Engine // Consensus engine used for validating
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockValidator returns a new block validator which is safe for re-use
|
// NewBlockValidator returns a new block validator which is safe for re-use
|
||||||
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, pow pow.PoW) *BlockValidator {
|
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
|
||||||
validator := &BlockValidator{
|
validator := &BlockValidator{
|
||||||
config: config,
|
config: config,
|
||||||
Pow: pow,
|
engine: engine,
|
||||||
bc: blockchain,
|
bc: blockchain,
|
||||||
}
|
}
|
||||||
return validator
|
return validator
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateBlock validates the given block's header and uncles and verifies the
|
// ValidateBody validates the given block's uncles and verifies the the block
|
||||||
// the block header's transaction and uncle roots.
|
// header's transaction and uncle roots. The headers are assumed to be already
|
||||||
//
|
// validated at this point.
|
||||||
// ValidateBlock does not validate the header's pow. The pow work validated
|
func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||||
// separately so we can process them in parallel.
|
// Check whether the block's known, and if not, that it's linkable
|
||||||
//
|
|
||||||
// ValidateBlock also validates and makes sure that any previous state (or present)
|
|
||||||
// state that might or might not be present is checked to make sure that fast
|
|
||||||
// sync has done it's job proper. This prevents the block validator from accepting
|
|
||||||
// false positives where a header is present but the state is not.
|
|
||||||
func (v *BlockValidator) ValidateBlock(block *types.Block) error {
|
|
||||||
if v.bc.HasBlock(block.Hash()) {
|
if v.bc.HasBlock(block.Hash()) {
|
||||||
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
|
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
|
||||||
return &KnownBlockError{block.Number(), block.Hash()}
|
return &KnownBlockError{block.Number(), block.Hash()}
|
||||||
|
@ -80,30 +64,17 @@ func (v *BlockValidator) ValidateBlock(block *types.Block) error {
|
||||||
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
|
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
|
||||||
return ParentError(block.ParentHash())
|
return ParentError(block.ParentHash())
|
||||||
}
|
}
|
||||||
|
// Header validity is known at this point, check the uncles and transactions
|
||||||
header := block.Header()
|
header := block.Header()
|
||||||
// validate the block header
|
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
|
||||||
if err := ValidateHeader(v.config, v.Pow, header, parent.Header(), false, false); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// verify the uncles are correctly rewarded
|
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
|
||||||
if err := v.VerifyUncles(block, parent); err != nil {
|
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
|
||||||
// Verify UncleHash before running other uncle validations
|
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
|
||||||
unclesSha := types.CalcUncleHash(block.Uncles())
|
|
||||||
if unclesSha != header.UncleHash {
|
|
||||||
return fmt.Errorf("invalid uncles root hash (remote: %x local: %x)", header.UncleHash, unclesSha)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
|
|
||||||
// can be used by light clients to make sure they've received the correct Txs
|
|
||||||
txSha := types.DeriveSha(block.Transactions())
|
|
||||||
if txSha != header.TxHash {
|
|
||||||
return fmt.Errorf("invalid transaction root hash (remote: %x local: %x)", header.TxHash, txSha)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,222 +106,6 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyUncles verifies the given block's uncles and applies the Ethereum
|
|
||||||
// consensus rules to the various block headers included; it will return an
|
|
||||||
// error if any of the included uncle headers were invalid. It returns an error
|
|
||||||
// if the validation failed.
|
|
||||||
func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
|
|
||||||
// validate that there are at most 2 uncles included in this block
|
|
||||||
if len(block.Uncles()) > 2 {
|
|
||||||
return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
|
|
||||||
}
|
|
||||||
|
|
||||||
uncles := set.New()
|
|
||||||
ancestors := make(map[common.Hash]*types.Block)
|
|
||||||
for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) {
|
|
||||||
ancestors[ancestor.Hash()] = ancestor
|
|
||||||
// Include ancestors uncles in the uncle set. Uncles must be unique.
|
|
||||||
for _, uncle := range ancestor.Uncles() {
|
|
||||||
uncles.Add(uncle.Hash())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ancestors[block.Hash()] = block
|
|
||||||
uncles.Add(block.Hash())
|
|
||||||
|
|
||||||
for i, uncle := range block.Uncles() {
|
|
||||||
hash := uncle.Hash()
|
|
||||||
if uncles.Has(hash) {
|
|
||||||
// Error not unique
|
|
||||||
return UncleError("uncle[%d](%x) not unique", i, hash[:4])
|
|
||||||
}
|
|
||||||
uncles.Add(hash)
|
|
||||||
|
|
||||||
if ancestors[hash] != nil {
|
|
||||||
branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
|
|
||||||
for h := range ancestors {
|
|
||||||
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
|
||||||
}
|
|
||||||
log.Warn(branch)
|
|
||||||
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
|
||||||
}
|
|
||||||
|
|
||||||
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
|
|
||||||
return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ValidateHeader(v.config, v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
|
|
||||||
return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateHeader validates the given header and, depending on the pow arg,
|
|
||||||
// checks the proof of work of the given header. Returns an error if the
|
|
||||||
// validation failed.
|
|
||||||
func (v *BlockValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
|
|
||||||
// Short circuit if the parent is missing.
|
|
||||||
if parent == nil {
|
|
||||||
return ParentError(header.ParentHash)
|
|
||||||
}
|
|
||||||
// Short circuit if the header's already known or its parent is missing
|
|
||||||
if v.bc.HasHeader(header.Hash()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates a header. Returns an error if the header is invalid.
|
|
||||||
//
|
|
||||||
// See YP section 4.3.4. "Block Header Validity"
|
|
||||||
func ValidateHeader(config *params.ChainConfig, pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
|
|
||||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
|
||||||
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
|
|
||||||
}
|
|
||||||
|
|
||||||
if uncle {
|
|
||||||
if header.Time.Cmp(math.MaxBig256) == 1 {
|
|
||||||
return BlockTSTooBigErr
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
|
|
||||||
return BlockFutureErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if header.Time.Cmp(parent.Time) != 1 {
|
|
||||||
return BlockEqualTSErr
|
|
||||||
}
|
|
||||||
|
|
||||||
expd := CalcDifficulty(config, header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
|
||||||
if expd.Cmp(header.Difficulty) != 0 {
|
|
||||||
return fmt.Errorf("Difficulty check failed for header (remote: %v local: %v)", header.Difficulty, expd)
|
|
||||||
}
|
|
||||||
|
|
||||||
a := new(big.Int).Set(parent.GasLimit)
|
|
||||||
a = a.Sub(a, header.GasLimit)
|
|
||||||
a.Abs(a)
|
|
||||||
b := new(big.Int).Set(parent.GasLimit)
|
|
||||||
b = b.Div(b, params.GasLimitBoundDivisor)
|
|
||||||
if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
|
|
||||||
return fmt.Errorf("GasLimit check failed for header (remote: %v local_max: %v)", header.GasLimit, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
num := new(big.Int).Set(parent.Number)
|
|
||||||
num.Sub(header.Number, num)
|
|
||||||
if num.Cmp(big.NewInt(1)) != 0 {
|
|
||||||
return BlockNumberErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if checkPow {
|
|
||||||
// Verify the nonce of the header. Return an error if it's not valid
|
|
||||||
if err := pow.Verify(types.NewBlockWithHeader(header)); err != nil {
|
|
||||||
return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If all checks passed, validate the extra-data field for hard forks
|
|
||||||
if err := ValidateDAOHeaderExtraData(config, header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !uncle && config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
|
|
||||||
if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
|
|
||||||
return ValidationError("Homestead gas reprice fork hash mismatch: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
|
||||||
// the difficulty that a new block should have when created at time
|
|
||||||
// given the parent block's time and difficulty.
|
|
||||||
func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
|
||||||
if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
|
|
||||||
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
|
|
||||||
} else {
|
|
||||||
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
|
||||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
|
||||||
// algorithm:
|
|
||||||
// diff = (parent_diff +
|
|
||||||
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
|
||||||
// ) + 2^(periodCount - 2)
|
|
||||||
|
|
||||||
bigTime := new(big.Int).SetUint64(time)
|
|
||||||
bigParentTime := new(big.Int).SetUint64(parentTime)
|
|
||||||
|
|
||||||
// holds intermediate values to make the algo easier to read & audit
|
|
||||||
x := new(big.Int)
|
|
||||||
y := new(big.Int)
|
|
||||||
|
|
||||||
// 1 - (block_timestamp -parent_timestamp) // 10
|
|
||||||
x.Sub(bigTime, bigParentTime)
|
|
||||||
x.Div(x, big10)
|
|
||||||
x.Sub(common.Big1, x)
|
|
||||||
|
|
||||||
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
|
|
||||||
if x.Cmp(bigMinus99) < 0 {
|
|
||||||
x.Set(bigMinus99)
|
|
||||||
}
|
|
||||||
|
|
||||||
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
|
||||||
y.Div(parentDiff, params.DifficultyBoundDivisor)
|
|
||||||
x.Mul(y, x)
|
|
||||||
x.Add(parentDiff, x)
|
|
||||||
|
|
||||||
// minimum difficulty can ever be (before exponential factor)
|
|
||||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
|
||||||
x.Set(params.MinimumDifficulty)
|
|
||||||
}
|
|
||||||
|
|
||||||
// for the exponential factor
|
|
||||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
|
||||||
periodCount.Div(periodCount, ExpDiffPeriod)
|
|
||||||
|
|
||||||
// the exponential factor, commonly referred to as "the bomb"
|
|
||||||
// diff = diff + 2^(periodCount - 2)
|
|
||||||
if periodCount.Cmp(common.Big1) > 0 {
|
|
||||||
y.Sub(periodCount, common.Big2)
|
|
||||||
y.Exp(common.Big2, y, nil)
|
|
||||||
x.Add(x, y)
|
|
||||||
}
|
|
||||||
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
|
||||||
diff := new(big.Int)
|
|
||||||
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
|
|
||||||
bigTime := new(big.Int)
|
|
||||||
bigParentTime := new(big.Int)
|
|
||||||
|
|
||||||
bigTime.SetUint64(time)
|
|
||||||
bigParentTime.SetUint64(parentTime)
|
|
||||||
|
|
||||||
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
|
||||||
diff.Add(parentDiff, adjust)
|
|
||||||
} else {
|
|
||||||
diff.Sub(parentDiff, adjust)
|
|
||||||
}
|
|
||||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
|
||||||
diff.Set(params.MinimumDifficulty)
|
|
||||||
}
|
|
||||||
|
|
||||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
|
||||||
periodCount.Div(periodCount, ExpDiffPeriod)
|
|
||||||
if periodCount.Cmp(common.Big1) > 0 {
|
|
||||||
// diff = diff + 2^(periodCount - 2)
|
|
||||||
expDiff := periodCount.Sub(periodCount, common.Big2)
|
|
||||||
expDiff.Exp(common.Big2, expDiff, nil)
|
|
||||||
diff.Add(diff, expDiff)
|
|
||||||
diff = math.BigMax(diff, params.MinimumDifficulty)
|
|
||||||
}
|
|
||||||
|
|
||||||
return diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// CalcGasLimit computes the gas limit of the next block after parent.
|
// CalcGasLimit computes the gas limit of the next block after parent.
|
||||||
// The result may be modified by the caller.
|
// The result may be modified by the caller.
|
||||||
// This is miner strategy, not consensus protocol.
|
// This is miner strategy, not consensus protocol.
|
||||||
|
|
|
@ -17,64 +17,179 @@
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func testGenesis(account common.Address, balance *big.Int) *Genesis {
|
// Tests that simple header verification works, for both good and bad blocks.
|
||||||
return &Genesis{
|
func TestHeaderVerification(t *testing.T) {
|
||||||
Config: params.TestChainConfig,
|
// Create a simple chain to verify
|
||||||
Alloc: GenesisAlloc{account: {Balance: balance}},
|
var (
|
||||||
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
|
gspec = &Genesis{Config: params.TestChainConfig}
|
||||||
|
genesis = gspec.MustCommit(testdb)
|
||||||
|
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
|
||||||
|
)
|
||||||
|
headers := make([]*types.Header, len(blocks))
|
||||||
|
for i, block := range blocks {
|
||||||
|
headers[i] = block.Header()
|
||||||
|
}
|
||||||
|
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
|
||||||
|
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
|
for i := 0; i < len(blocks); i++ {
|
||||||
|
for j, valid := range []bool{true, false} {
|
||||||
|
var results <-chan error
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
engine := ethash.NewFaker()
|
||||||
|
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
|
||||||
|
} else {
|
||||||
|
engine := ethash.NewFakeFailer(headers[i].Number.Uint64())
|
||||||
|
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
|
||||||
|
}
|
||||||
|
// Wait for the verification result
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
if (result == nil) != valid {
|
||||||
|
t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, result, valid)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("test %d.%d: verification timeout", i, j)
|
||||||
|
}
|
||||||
|
// Make sure no more data is returned
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chain.InsertChain(blocks[i : i+1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNumber(t *testing.T) {
|
// Tests that concurrent header verification works, for both good and bad blocks.
|
||||||
chain := newTestBlockChain()
|
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
|
||||||
statedb, _ := state.New(chain.Genesis().Root(), chain.chainDb)
|
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
|
||||||
header := makeHeader(chain.config, chain.Genesis(), statedb)
|
func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) }
|
||||||
header.Number = big.NewInt(3)
|
|
||||||
err := ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
|
|
||||||
if err != BlockNumberErr {
|
|
||||||
t.Errorf("expected block number error, got %q", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
header = makeHeader(chain.config, chain.Genesis(), statedb)
|
func testHeaderConcurrentVerification(t *testing.T, threads int) {
|
||||||
err = ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
|
// Create a simple chain to verify
|
||||||
if err == BlockNumberErr {
|
var (
|
||||||
t.Errorf("didn't expect block number error")
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
|
gspec = &Genesis{Config: params.TestChainConfig}
|
||||||
|
genesis = gspec.MustCommit(testdb)
|
||||||
|
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
|
||||||
|
)
|
||||||
|
headers := make([]*types.Header, len(blocks))
|
||||||
|
seals := make([]bool, len(blocks))
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
headers[i] = block.Header()
|
||||||
|
seals[i] = true
|
||||||
|
}
|
||||||
|
// Set the number of threads to verify on
|
||||||
|
old := runtime.GOMAXPROCS(threads)
|
||||||
|
defer runtime.GOMAXPROCS(old)
|
||||||
|
|
||||||
|
// Run the header checker for the entire block chain at once both for a valid and
|
||||||
|
// also an invalid chain (enough if one arbitrary block is invalid).
|
||||||
|
for i, valid := range []bool{true, false} {
|
||||||
|
var results <-chan error
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
|
||||||
|
} else {
|
||||||
|
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), new(event.TypeMux), vm.Config{})
|
||||||
|
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
|
||||||
|
}
|
||||||
|
// Wait for all the verification results
|
||||||
|
checks := make(map[int]error)
|
||||||
|
for j := 0; j < len(blocks); j++ {
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
checks[j] = result
|
||||||
|
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("test %d.%d: verification timeout", i, j)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check nonce check validity
|
||||||
|
for j := 0; j < len(blocks); j++ {
|
||||||
|
want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail
|
||||||
|
if (checks[j] == nil) != want {
|
||||||
|
t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want)
|
||||||
|
}
|
||||||
|
if !want {
|
||||||
|
// A few blocks after the first error may pass verification due to concurrent
|
||||||
|
// workers. We don't care about those in this test, just that the correct block
|
||||||
|
// errors out.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure no more data is returned
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
t.Fatalf("test %d: unexpected result returned: %v", i, result)
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPutReceipt(t *testing.T) {
|
// Tests that aborting a header validation indeed prevents further checks from being
|
||||||
db, _ := ethdb.NewMemDatabase()
|
// run, as well as checks that no left-over goroutines are leaked.
|
||||||
|
func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) }
|
||||||
|
func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) }
|
||||||
|
func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) }
|
||||||
|
|
||||||
var addr common.Address
|
func testHeaderConcurrentAbortion(t *testing.T, threads int) {
|
||||||
addr[0] = 1
|
// Create a simple chain to verify
|
||||||
var hash common.Hash
|
var (
|
||||||
hash[0] = 2
|
testdb, _ = ethdb.NewMemDatabase()
|
||||||
|
gspec = &Genesis{Config: params.TestChainConfig}
|
||||||
|
genesis = gspec.MustCommit(testdb)
|
||||||
|
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
|
||||||
|
)
|
||||||
|
headers := make([]*types.Header, len(blocks))
|
||||||
|
seals := make([]bool, len(blocks))
|
||||||
|
|
||||||
receipt := new(types.Receipt)
|
for i, block := range blocks {
|
||||||
receipt.Logs = []*types.Log{{
|
headers[i] = block.Header()
|
||||||
Address: addr,
|
seals[i] = true
|
||||||
Topics: []common.Hash{hash},
|
}
|
||||||
Data: []byte("hi"),
|
// Set the number of threads to verify on
|
||||||
BlockNumber: 42,
|
old := runtime.GOMAXPROCS(threads)
|
||||||
TxHash: hash,
|
defer runtime.GOMAXPROCS(old)
|
||||||
TxIndex: 0,
|
|
||||||
BlockHash: hash,
|
|
||||||
Index: 0,
|
|
||||||
}}
|
|
||||||
|
|
||||||
WriteReceipts(db, types.Receipts{receipt})
|
// Start the verifications and immediately abort
|
||||||
receipt = GetReceipt(db, common.Hash{})
|
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), new(event.TypeMux), vm.Config{})
|
||||||
if receipt == nil {
|
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
|
||||||
t.Error("expected to get 1 receipt, got none.")
|
close(abort)
|
||||||
|
|
||||||
|
// Deplete the results channel
|
||||||
|
verified := 0
|
||||||
|
for depleted := false; !depleted; {
|
||||||
|
select {
|
||||||
|
case result := <-results:
|
||||||
|
if result != nil {
|
||||||
|
t.Errorf("header %d: validation failed: %v", verified, result)
|
||||||
|
}
|
||||||
|
verified++
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
depleted = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check that abortion was honored by not processing too many POWs
|
||||||
|
if verified > 2*threads {
|
||||||
|
t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -39,7 +40,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
|
@ -104,7 +104,7 @@ type BlockChain struct {
|
||||||
procInterrupt int32 // interrupt signaler for block processing
|
procInterrupt int32 // interrupt signaler for block processing
|
||||||
wg sync.WaitGroup // chain processing wait group for shutting down
|
wg sync.WaitGroup // chain processing wait group for shutting down
|
||||||
|
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
processor Processor // block processor interface
|
processor Processor // block processor interface
|
||||||
validator Validator // block and state validator interface
|
validator Validator // block and state validator interface
|
||||||
vmConfig vm.Config
|
vmConfig vm.Config
|
||||||
|
@ -115,7 +115,7 @@ type BlockChain struct {
|
||||||
// NewBlockChain returns a fully initialised block chain using information
|
// NewBlockChain returns a fully initialised block chain using information
|
||||||
// available in the database. It initialiser the default Ethereum Validator and
|
// available in the database. It initialiser the default Ethereum Validator and
|
||||||
// Processor.
|
// Processor.
|
||||||
func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.PoW, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
|
func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
|
||||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||||
blockCache, _ := lru.New(blockCacheLimit)
|
blockCache, _ := lru.New(blockCacheLimit)
|
||||||
|
@ -131,25 +131,22 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
|
||||||
bodyRLPCache: bodyRLPCache,
|
bodyRLPCache: bodyRLPCache,
|
||||||
blockCache: blockCache,
|
blockCache: blockCache,
|
||||||
futureBlocks: futureBlocks,
|
futureBlocks: futureBlocks,
|
||||||
pow: pow,
|
engine: engine,
|
||||||
vmConfig: vmConfig,
|
vmConfig: vmConfig,
|
||||||
badBlocks: badBlocks,
|
badBlocks: badBlocks,
|
||||||
}
|
}
|
||||||
bc.SetValidator(NewBlockValidator(config, bc, pow))
|
bc.SetValidator(NewBlockValidator(config, bc, engine))
|
||||||
bc.SetProcessor(NewStateProcessor(config, bc))
|
bc.SetProcessor(NewStateProcessor(config, bc, engine))
|
||||||
|
|
||||||
gv := func() HeaderValidator { return bc.Validator() }
|
|
||||||
var err error
|
var err error
|
||||||
bc.hc, err = NewHeaderChain(chainDb, config, gv, bc.getProcInterrupt)
|
bc.hc, err = NewHeaderChain(chainDb, config, engine, bc.getProcInterrupt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bc.genesisBlock = bc.GetBlockByNumber(0)
|
bc.genesisBlock = bc.GetBlockByNumber(0)
|
||||||
if bc.genesisBlock == nil {
|
if bc.genesisBlock == nil {
|
||||||
return nil, ErrNoGenesis
|
return nil, ErrNoGenesis
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bc.loadLastState(); err != nil {
|
if err := bc.loadLastState(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -233,9 +230,6 @@ func (self *BlockChain) loadLastState() error {
|
||||||
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
|
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
|
||||||
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
|
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
|
||||||
|
|
||||||
// Try to be smart and issue a pow verification for the head to pre-generate caches
|
|
||||||
go self.pow.Verify(types.NewBlockWithHeader(currentHeader))
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,9 +377,6 @@ func (self *BlockChain) Processor() Processor {
|
||||||
return self.processor
|
return self.processor
|
||||||
}
|
}
|
||||||
|
|
||||||
// AuxValidator returns the auxiliary validator (Proof of work atm)
|
|
||||||
func (self *BlockChain) AuxValidator() pow.PoW { return self.pow }
|
|
||||||
|
|
||||||
// State returns a new mutable state based on the current HEAD block.
|
// State returns a new mutable state based on the current HEAD block.
|
||||||
func (self *BlockChain) State() (*state.StateDB, error) {
|
func (self *BlockChain) State() (*state.StateDB, error) {
|
||||||
return self.StateAt(self.CurrentBlock().Root())
|
return self.StateAt(self.CurrentBlock().Root())
|
||||||
|
@ -906,38 +897,38 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||||
stats = insertStats{startTime: mclock.Now()}
|
stats = insertStats{startTime: mclock.Now()}
|
||||||
events = make([]interface{}, 0, len(chain))
|
events = make([]interface{}, 0, len(chain))
|
||||||
coalescedLogs []*types.Log
|
coalescedLogs []*types.Log
|
||||||
nonceChecked = make([]bool, len(chain))
|
|
||||||
)
|
)
|
||||||
|
// Start the parallel header verifier
|
||||||
// Start the parallel nonce verifier.
|
headers := make([]*types.Header, len(chain))
|
||||||
nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
|
seals := make([]bool, len(chain))
|
||||||
defer close(nonceAbort)
|
|
||||||
|
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
|
headers[i] = block.Header()
|
||||||
|
seals[i] = true
|
||||||
|
}
|
||||||
|
abort, results := self.engine.VerifyHeaders(self, headers, seals)
|
||||||
|
defer close(abort)
|
||||||
|
|
||||||
|
// Iterate over the blocks and insert when the verifier permits
|
||||||
|
for i, block := range chain {
|
||||||
|
// If the chain is terminating, stop processing blocks
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
log.Debug("Premature abort during blocks processing")
|
log.Debug("Premature abort during blocks processing")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
bstart := time.Now()
|
// If the header is a banned one, straight out abort
|
||||||
// Wait for block i's nonce to be verified before processing
|
|
||||||
// its state transition.
|
|
||||||
for !nonceChecked[i] {
|
|
||||||
r := <-nonceResults
|
|
||||||
nonceChecked[r.index] = true
|
|
||||||
if !r.valid {
|
|
||||||
invalid := chain[r.index]
|
|
||||||
return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if BadHashes[block.Hash()] {
|
if BadHashes[block.Hash()] {
|
||||||
err := BadHashError(block.Hash())
|
err := BadHashError(block.Hash())
|
||||||
self.reportBlock(block, nil, err)
|
self.reportBlock(block, nil, err)
|
||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
// Stage 1 validation of the block using the chain's validator
|
// Wait for the block's verification to complete
|
||||||
// interface.
|
bstart := time.Now()
|
||||||
err := self.Validator().ValidateBlock(block)
|
|
||||||
|
err := <-results
|
||||||
|
if err == nil {
|
||||||
|
err = self.Validator().ValidateBody(block)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsKnownBlockErr(err) {
|
if IsKnownBlockErr(err) {
|
||||||
stats.ignored++
|
stats.ignored++
|
||||||
|
@ -952,7 +943,6 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||||
if block.Time().Cmp(max) == 1 {
|
if block.Time().Cmp(max) == 1 {
|
||||||
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
|
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.futureBlocks.Add(block.Hash(), block)
|
self.futureBlocks.Add(block.Hash(), block)
|
||||||
stats.queued++
|
stats.queued++
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -31,18 +32,21 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// newTestBlockChain creates a blockchain without validation.
|
// newTestBlockChain creates a blockchain without validation.
|
||||||
func newTestBlockChain() *BlockChain {
|
func newTestBlockChain(fake bool) *BlockChain {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Difficulty: big.NewInt(1),
|
Difficulty: big.NewInt(1),
|
||||||
}
|
}
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
blockchain, err := NewBlockChain(db, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
engine := ethash.NewFullFaker()
|
||||||
|
if !fake {
|
||||||
|
engine = ethash.NewTester()
|
||||||
|
}
|
||||||
|
blockchain, err := NewBlockChain(db, gspec.Config, engine, new(event.TypeMux), vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -117,7 +121,10 @@ func printChain(bc *BlockChain) {
|
||||||
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
for _, block := range chain {
|
for _, block := range chain {
|
||||||
// Try and process the block
|
// Try and process the block
|
||||||
err := blockchain.Validator().ValidateBlock(block)
|
err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
|
||||||
|
if err == nil {
|
||||||
|
err = blockchain.validator.ValidateBody(block)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsKnownBlockErr(err) {
|
if IsKnownBlockErr(err) {
|
||||||
continue
|
continue
|
||||||
|
@ -133,7 +140,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
blockchain.reportBlock(block, receipts, err)
|
blockchain.reportBlock(block, receipts, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = blockchain.Validator().ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
|
err = blockchain.validator.ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
blockchain.reportBlock(block, receipts, err)
|
blockchain.reportBlock(block, receipts, err)
|
||||||
return err
|
return err
|
||||||
|
@ -152,7 +159,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||||
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
||||||
for _, header := range chain {
|
for _, header := range chain {
|
||||||
// Try and validate the header
|
// Try and validate the header
|
||||||
if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeaderByHash(header.ParentHash), false); err != nil {
|
if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
||||||
|
@ -174,7 +181,7 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLastBlock(t *testing.T) {
|
func TestLastBlock(t *testing.T) {
|
||||||
bchain := newTestBlockChain()
|
bchain := newTestBlockChain(false)
|
||||||
block := makeBlockChain(bchain.CurrentBlock(), 1, bchain.chainDb, 0)[0]
|
block := makeBlockChain(bchain.CurrentBlock(), 1, bchain.chainDb, 0)[0]
|
||||||
bchain.insert(block)
|
bchain.insert(block)
|
||||||
if block.Hash() != GetHeadBlockHash(bchain.chainDb) {
|
if block.Hash() != GetHeadBlockHash(bchain.chainDb) {
|
||||||
|
@ -318,8 +325,7 @@ func testBrokenChain(t *testing.T, full bool) {
|
||||||
|
|
||||||
type bproc struct{}
|
type bproc struct{}
|
||||||
|
|
||||||
func (bproc) ValidateBlock(*types.Block) error { return nil }
|
func (bproc) ValidateBody(*types.Block) error { return nil }
|
||||||
func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
|
|
||||||
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
|
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -378,7 +384,7 @@ func testReorgShort(t *testing.T, full bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
|
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
|
||||||
bc := newTestBlockChain()
|
bc := newTestBlockChain(true)
|
||||||
|
|
||||||
// Insert an easy and a difficult chain afterwards
|
// Insert an easy and a difficult chain afterwards
|
||||||
if full {
|
if full {
|
||||||
|
@ -422,7 +428,7 @@ func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
||||||
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
||||||
|
|
||||||
func testBadHashes(t *testing.T, full bool) {
|
func testBadHashes(t *testing.T, full bool) {
|
||||||
bc := newTestBlockChain()
|
bc := newTestBlockChain(true)
|
||||||
|
|
||||||
// Create a chain, ban a hash and try to import
|
// Create a chain, ban a hash and try to import
|
||||||
var err error
|
var err error
|
||||||
|
@ -446,7 +452,7 @@ func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
||||||
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
||||||
|
|
||||||
func testReorgBadHashes(t *testing.T, full bool) {
|
func testReorgBadHashes(t *testing.T, full bool) {
|
||||||
bc := newTestBlockChain()
|
bc := newTestBlockChain(true)
|
||||||
|
|
||||||
// Create a chain, import and ban afterwards
|
// Create a chain, import and ban afterwards
|
||||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
|
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
|
||||||
|
@ -473,7 +479,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new BlockChain and check that it rolled back the state.
|
// Create a new BlockChain and check that it rolled back the state.
|
||||||
ncm, err := NewBlockChain(bc.chainDb, bc.config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
ncm, err := NewBlockChain(bc.chainDb, bc.config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new chain manager: %v", err)
|
t.Fatalf("failed to create new chain manager: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -504,46 +510,34 @@ func testInsertNonceError(t *testing.T, full bool) {
|
||||||
}
|
}
|
||||||
// Create and insert a chain with a failing nonce
|
// Create and insert a chain with a failing nonce
|
||||||
var (
|
var (
|
||||||
failAt int
|
failAt int
|
||||||
failRes int
|
failRes int
|
||||||
failNum uint64
|
failNum uint64
|
||||||
failHash common.Hash
|
|
||||||
)
|
)
|
||||||
if full {
|
if full {
|
||||||
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
|
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
|
||||||
|
|
||||||
failAt = rand.Int() % len(blocks)
|
failAt = rand.Int() % len(blocks)
|
||||||
failNum = blocks[failAt].NumberU64()
|
failNum = blocks[failAt].NumberU64()
|
||||||
failHash = blocks[failAt].Hash()
|
|
||||||
|
|
||||||
blockchain.pow = failPow{failNum}
|
|
||||||
|
|
||||||
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
||||||
failRes, err = blockchain.InsertChain(blocks)
|
failRes, err = blockchain.InsertChain(blocks)
|
||||||
} else {
|
} else {
|
||||||
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
|
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
|
||||||
|
|
||||||
failAt = rand.Int() % len(headers)
|
failAt = rand.Int() % len(headers)
|
||||||
failNum = headers[failAt].Number.Uint64()
|
failNum = headers[failAt].Number.Uint64()
|
||||||
failHash = headers[failAt].Hash()
|
|
||||||
|
|
||||||
blockchain.pow = failPow{failNum}
|
|
||||||
blockchain.validator = NewBlockValidator(params.TestChainConfig, blockchain, failPow{failNum})
|
|
||||||
|
|
||||||
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
||||||
|
blockchain.hc.engine = blockchain.engine
|
||||||
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
||||||
}
|
}
|
||||||
// Check that the returned error indicates the nonce failure.
|
// Check that the returned error indicates the nonce failure.
|
||||||
if failRes != failAt {
|
if failRes != failAt {
|
||||||
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
||||||
}
|
}
|
||||||
if !IsBlockNonceErr(err) {
|
if err != ethash.ErrInvalidPoW {
|
||||||
t.Fatalf("test %d: error mismatch: have %v, want nonce error %T", i, err, err)
|
t.Fatalf("test %d: error mismatch: have %v, want %v", i, err, ethash.ErrInvalidPoW)
|
||||||
}
|
|
||||||
nerr := err.(*BlockNonceErr)
|
|
||||||
if nerr.Number.Uint64() != failNum {
|
|
||||||
t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
|
|
||||||
}
|
|
||||||
if nerr.Hash != failHash {
|
|
||||||
t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
|
|
||||||
}
|
}
|
||||||
// Check that all no blocks after the failing block have been inserted.
|
// Check that all no blocks after the failing block have been inserted.
|
||||||
for j := 0; j < i-failAt; j++ {
|
for j := 0; j < i-failAt; j++ {
|
||||||
|
@ -569,9 +563,12 @@ func TestFastVsFullChains(t *testing.T) {
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
funds = big.NewInt(1000000000)
|
funds = big.NewInt(1000000000)
|
||||||
gspec = testGenesis(address, funds)
|
gspec = &Genesis{
|
||||||
genesis = gspec.MustCommit(gendb)
|
Config: params.TestChainConfig,
|
||||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||||
|
}
|
||||||
|
genesis = gspec.MustCommit(gendb)
|
||||||
|
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||||
)
|
)
|
||||||
blocks, receipts := GenerateChain(gspec.Config, genesis, gendb, 1024, func(i int, block *BlockGen) {
|
blocks, receipts := GenerateChain(gspec.Config, genesis, gendb, 1024, func(i int, block *BlockGen) {
|
||||||
block.SetCoinbase(common.Address{0x00})
|
block.SetCoinbase(common.Address{0x00})
|
||||||
|
@ -594,7 +591,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||||
// Import the chain as an archive node for the comparison baseline
|
// Import the chain as an archive node for the comparison baseline
|
||||||
archiveDb, _ := ethdb.NewMemDatabase()
|
archiveDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(archiveDb)
|
gspec.MustCommit(archiveDb)
|
||||||
archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
if n, err := archive.InsertChain(blocks); err != nil {
|
if n, err := archive.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to process block %d: %v", n, err)
|
t.Fatalf("failed to process block %d: %v", n, err)
|
||||||
|
@ -603,7 +600,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||||
// Fast import the chain as a non-archive node to test
|
// Fast import the chain as a non-archive node to test
|
||||||
fastDb, _ := ethdb.NewMemDatabase()
|
fastDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(fastDb)
|
gspec.MustCommit(fastDb)
|
||||||
fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
headers := make([]*types.Header, len(blocks))
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
|
@ -680,8 +677,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||||
archiveDb, _ := ethdb.NewMemDatabase()
|
archiveDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(archiveDb)
|
gspec.MustCommit(archiveDb)
|
||||||
|
|
||||||
archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
if n, err := archive.InsertChain(blocks); err != nil {
|
if n, err := archive.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to process block %d: %v", n, err)
|
t.Fatalf("failed to process block %d: %v", n, err)
|
||||||
}
|
}
|
||||||
|
@ -692,7 +688,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||||
// Import the chain as a non-archive node and ensure all pointers are updated
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
||||||
fastDb, _ := ethdb.NewMemDatabase()
|
fastDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(fastDb)
|
gspec.MustCommit(fastDb)
|
||||||
fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
headers := make([]*types.Header, len(blocks))
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
|
@ -711,8 +707,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||||
// Import the chain as a light node and ensure all pointers are updated
|
// Import the chain as a light node and ensure all pointers are updated
|
||||||
lightDb, _ := ethdb.NewMemDatabase()
|
lightDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(lightDb)
|
gspec.MustCommit(lightDb)
|
||||||
light, _ := NewBlockChain(lightDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
|
||||||
|
|
||||||
|
light, _ := NewBlockChain(lightDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
||||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||||
}
|
}
|
||||||
|
@ -780,7 +776,7 @@ func TestChainTxReorgs(t *testing.T) {
|
||||||
})
|
})
|
||||||
// Import the chain. This runs all block validation rules.
|
// Import the chain. This runs all block validation rules.
|
||||||
evmux := &event.TypeMux{}
|
evmux := &event.TypeMux{}
|
||||||
blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
|
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
|
||||||
if i, err := blockchain.InsertChain(chain); err != nil {
|
if i, err := blockchain.InsertChain(chain); err != nil {
|
||||||
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
||||||
}
|
}
|
||||||
|
@ -851,7 +847,7 @@ func TestLogReorgs(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
var evmux event.TypeMux
|
var evmux event.TypeMux
|
||||||
blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &evmux, vm.Config{})
|
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &evmux, vm.Config{})
|
||||||
|
|
||||||
subs := evmux.Subscribe(RemovedLogsEvent{})
|
subs := evmux.Subscribe(RemovedLogsEvent{})
|
||||||
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
|
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
|
||||||
|
@ -883,13 +879,16 @@ func TestReorgSideEvent(t *testing.T) {
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||||
gspec = testGenesis(addr1, big.NewInt(10000000000000))
|
gspec = &Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}},
|
||||||
|
}
|
||||||
genesis = gspec.MustCommit(db)
|
genesis = gspec.MustCommit(db)
|
||||||
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
signer = types.NewEIP155Signer(gspec.Config.ChainId)
|
||||||
)
|
)
|
||||||
|
|
||||||
evmux := &event.TypeMux{}
|
evmux := &event.TypeMux{}
|
||||||
blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
|
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
|
||||||
|
|
||||||
chain, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, gen *BlockGen) {})
|
chain, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, gen *BlockGen) {})
|
||||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||||
|
@ -959,7 +958,7 @@ done:
|
||||||
|
|
||||||
// Tests if the canonical block can be fetched from the database during chain insertion.
|
// Tests if the canonical block can be fetched from the database during chain insertion.
|
||||||
func TestCanonicalBlockRetrieval(t *testing.T) {
|
func TestCanonicalBlockRetrieval(t *testing.T) {
|
||||||
bc := newTestBlockChain()
|
bc := newTestBlockChain(false)
|
||||||
chain, _ := GenerateChain(bc.config, bc.genesisBlock, bc.chainDb, 10, func(i int, gen *BlockGen) {})
|
chain, _ := GenerateChain(bc.config, bc.genesisBlock, bc.chainDb, 10, func(i int, gen *BlockGen) {})
|
||||||
|
|
||||||
for i := range chain {
|
for i := range chain {
|
||||||
|
@ -1004,7 +1003,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||||
mux event.TypeMux
|
mux event.TypeMux
|
||||||
)
|
)
|
||||||
|
|
||||||
blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
|
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
|
||||||
blocks, _ := GenerateChain(gspec.Config, genesis, db, 4, func(i int, block *BlockGen) {
|
blocks, _ := GenerateChain(gspec.Config, genesis, db, 4, func(i int, block *BlockGen) {
|
||||||
var (
|
var (
|
||||||
tx *types.Transaction
|
tx *types.Transaction
|
||||||
|
@ -1110,7 +1109,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||||
}
|
}
|
||||||
genesis = gspec.MustCommit(db)
|
genesis = gspec.MustCommit(db)
|
||||||
mux event.TypeMux
|
mux event.TypeMux
|
||||||
blockchain, _ = NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
|
blockchain, _ = NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
|
||||||
)
|
)
|
||||||
blocks, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, block *BlockGen) {
|
blocks, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, block *BlockGen) {
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -21,13 +21,14 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// So we can deterministically seed different blockchains
|
// So we can deterministically seed different blockchains
|
||||||
|
@ -141,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
||||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||||
panic("block time out of range")
|
panic("block time out of range")
|
||||||
}
|
}
|
||||||
b.header.Difficulty = CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
|
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateChain creates a chain of n blocks. The first block's
|
// GenerateChain creates a chain of n blocks. The first block's
|
||||||
|
@ -173,13 +174,13 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(h.Number) == 0 {
|
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(h.Number) == 0 {
|
||||||
ApplyDAOHardFork(statedb)
|
misc.ApplyDAOHardFork(statedb)
|
||||||
}
|
}
|
||||||
// Execute any user modifications to the block and finalize it
|
// Execute any user modifications to the block and finalize it
|
||||||
if gen != nil {
|
if gen != nil {
|
||||||
gen(i, b)
|
gen(i, b)
|
||||||
}
|
}
|
||||||
AccumulateRewards(statedb, h, b.uncles)
|
ethash.AccumulateRewards(statedb, h, b.uncles)
|
||||||
root, err := statedb.Commit(config.IsEIP158(h.Number))
|
root, err := statedb.Commit(config.IsEIP158(h.Number))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("state write error: %v", err))
|
panic(fmt.Sprintf("state write error: %v", err))
|
||||||
|
@ -212,7 +213,7 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
|
||||||
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
|
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
|
||||||
ParentHash: parent.Hash(),
|
ParentHash: parent.Hash(),
|
||||||
Coinbase: parent.Coinbase(),
|
Coinbase: parent.Coinbase(),
|
||||||
Difficulty: CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
|
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
|
||||||
GasLimit: CalcGasLimit(parent),
|
GasLimit: CalcGasLimit(parent),
|
||||||
GasUsed: new(big.Int),
|
GasUsed: new(big.Int),
|
||||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||||
|
@ -229,7 +230,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
genesis := gspec.MustCommit(db)
|
genesis := gspec.MustCommit(db)
|
||||||
|
|
||||||
blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
// Create and inject the requested chain
|
// Create and inject the requested chain
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return db, blockchain, nil
|
return db, blockchain, nil
|
||||||
|
|
|
@ -20,13 +20,13 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleGenerateChain() {
|
func ExampleGenerateChain() {
|
||||||
|
@ -81,7 +81,7 @@ func ExampleGenerateChain() {
|
||||||
|
|
||||||
// Import the chain. This runs all block validation rules.
|
// Import the chain. This runs all block validation rules.
|
||||||
evmux := &event.TypeMux{}
|
evmux := &event.TypeMux{}
|
||||||
blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
|
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
|
||||||
if i, err := blockchain.InsertChain(chain); err != nil {
|
if i, err := blockchain.InsertChain(chain); err != nil {
|
||||||
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
|
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,87 +0,0 @@
|
||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
|
||||||
|
|
||||||
// nonceCheckResult contains the result of a nonce verification.
|
|
||||||
type nonceCheckResult struct {
|
|
||||||
index int // Index of the item verified from an input array
|
|
||||||
valid bool // Result of the nonce verification
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyNoncesFromHeaders starts a concurrent header nonce verification,
|
|
||||||
// returning a quit channel to abort the operations and a results channel
|
|
||||||
// to retrieve the async verifications.
|
|
||||||
func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
|
|
||||||
items := make([]pow.Block, len(headers))
|
|
||||||
for i, header := range headers {
|
|
||||||
items[i] = types.NewBlockWithHeader(header)
|
|
||||||
}
|
|
||||||
return verifyNonces(checker, items)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyNoncesFromBlocks starts a concurrent block nonce verification,
|
|
||||||
// returning a quit channel to abort the operations and a results channel
|
|
||||||
// to retrieve the async verifications.
|
|
||||||
func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
|
|
||||||
items := make([]pow.Block, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
items[i] = block
|
|
||||||
}
|
|
||||||
return verifyNonces(checker, items)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyNonces starts a concurrent nonce verification, returning a quit channel
|
|
||||||
// to abort the operations and a results channel to retrieve the async checks.
|
|
||||||
func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
|
|
||||||
// Spawn as many workers as allowed threads
|
|
||||||
workers := runtime.GOMAXPROCS(0)
|
|
||||||
if len(items) < workers {
|
|
||||||
workers = len(items)
|
|
||||||
}
|
|
||||||
// Create a task channel and spawn the verifiers
|
|
||||||
tasks := make(chan int, workers)
|
|
||||||
results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
|
|
||||||
for i := 0; i < workers; i++ {
|
|
||||||
go func() {
|
|
||||||
for index := range tasks {
|
|
||||||
results <- nonceCheckResult{index: index, valid: checker.Verify(items[index]) == nil}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// Feed item indices to the workers until done or aborted
|
|
||||||
abort := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(tasks)
|
|
||||||
|
|
||||||
for i := range items {
|
|
||||||
select {
|
|
||||||
case tasks <- i:
|
|
||||||
continue
|
|
||||||
case <-abort:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return abort, results
|
|
||||||
}
|
|
|
@ -1,238 +0,0 @@
|
||||||
// Copyright 2015 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
|
||||||
|
|
||||||
// failPow is a non-validating proof of work implementation, that returns true
|
|
||||||
// from Verify for all but one block.
|
|
||||||
type failPow struct {
|
|
||||||
failing uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
func (pow failPow) Verify(block pow.Block) error {
|
|
||||||
if block.NumberU64() == pow.failing {
|
|
||||||
return errors.New("failed")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (pow failPow) Hashrate() float64 { return 0 }
|
|
||||||
|
|
||||||
// delayedPow is a non-validating proof of work implementation, that returns true
|
|
||||||
// from Verify for all blocks, but delays them the configured amount of time.
|
|
||||||
type delayedPow struct {
|
|
||||||
delay time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
func (pow delayedPow) Verify(block pow.Block) error { time.Sleep(pow.delay); return nil }
|
|
||||||
func (pow delayedPow) Hashrate() float64 { return 0 }
|
|
||||||
|
|
||||||
// Tests that simple POW verification works, for both good and bad blocks.
|
|
||||||
func TestPowVerification(t *testing.T) {
|
|
||||||
// Create a simple chain to verify
|
|
||||||
var (
|
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
|
||||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
|
|
||||||
)
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
headers[i] = block.Header()
|
|
||||||
}
|
|
||||||
// Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
|
|
||||||
for i := 0; i < len(blocks); i++ {
|
|
||||||
for j, full := range []bool{true, false} {
|
|
||||||
for k, valid := range []bool{true, false} {
|
|
||||||
var results <-chan nonceCheckResult
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case full && valid:
|
|
||||||
_, results = verifyNoncesFromBlocks(pow.FakePow{}, []*types.Block{blocks[i]})
|
|
||||||
case full && !valid:
|
|
||||||
_, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
|
|
||||||
case !full && valid:
|
|
||||||
_, results = verifyNoncesFromHeaders(pow.FakePow{}, []*types.Header{headers[i]})
|
|
||||||
case !full && !valid:
|
|
||||||
_, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
|
|
||||||
}
|
|
||||||
// Wait for the verification result
|
|
||||||
select {
|
|
||||||
case result := <-results:
|
|
||||||
if result.index != 0 {
|
|
||||||
t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
|
|
||||||
}
|
|
||||||
if result.valid != valid {
|
|
||||||
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
|
|
||||||
}
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
|
|
||||||
}
|
|
||||||
// Make sure no more data is returned
|
|
||||||
select {
|
|
||||||
case result := <-results:
|
|
||||||
t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
|
|
||||||
case <-time.After(25 * time.Millisecond):
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that concurrent POW verification works, for both good and bad blocks.
|
|
||||||
func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
|
|
||||||
func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
|
|
||||||
func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
|
|
||||||
|
|
||||||
func testPowConcurrentVerification(t *testing.T, threads int) {
|
|
||||||
// Create a simple chain to verify
|
|
||||||
var (
|
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
|
||||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
|
|
||||||
)
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
headers[i] = block.Header()
|
|
||||||
}
|
|
||||||
// Set the number of threads to verify on
|
|
||||||
old := runtime.GOMAXPROCS(threads)
|
|
||||||
defer runtime.GOMAXPROCS(old)
|
|
||||||
|
|
||||||
// Run the POW checker for the entire block chain at once both for a valid and
|
|
||||||
// also an invalid chain (enough if one is invalid, last but one (arbitrary)).
|
|
||||||
for i, full := range []bool{true, false} {
|
|
||||||
for j, valid := range []bool{true, false} {
|
|
||||||
var results <-chan nonceCheckResult
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case full && valid:
|
|
||||||
_, results = verifyNoncesFromBlocks(pow.FakePow{}, blocks)
|
|
||||||
case full && !valid:
|
|
||||||
_, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
|
|
||||||
case !full && valid:
|
|
||||||
_, results = verifyNoncesFromHeaders(pow.FakePow{}, headers)
|
|
||||||
case !full && !valid:
|
|
||||||
_, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
|
|
||||||
}
|
|
||||||
// Wait for all the verification results
|
|
||||||
checks := make(map[int]bool)
|
|
||||||
for k := 0; k < len(blocks); k++ {
|
|
||||||
select {
|
|
||||||
case result := <-results:
|
|
||||||
if _, ok := checks[result.index]; ok {
|
|
||||||
t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
|
|
||||||
}
|
|
||||||
if result.index < 0 || result.index >= len(blocks) {
|
|
||||||
t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
|
|
||||||
}
|
|
||||||
checks[result.index] = result.valid
|
|
||||||
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Check nonce check validity
|
|
||||||
for k := 0; k < len(blocks); k++ {
|
|
||||||
want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
|
|
||||||
if checks[k] != want {
|
|
||||||
t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make sure no more data is returned
|
|
||||||
select {
|
|
||||||
case result := <-results:
|
|
||||||
t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
|
|
||||||
case <-time.After(25 * time.Millisecond):
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that aborting a POW validation indeed prevents further checks from being
|
|
||||||
// run, as well as checks that no left-over goroutines are leaked.
|
|
||||||
func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
|
|
||||||
func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
|
|
||||||
func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
|
|
||||||
|
|
||||||
func testPowConcurrentAbortion(t *testing.T, threads int) {
|
|
||||||
// Create a simple chain to verify
|
|
||||||
var (
|
|
||||||
testdb, _ = ethdb.NewMemDatabase()
|
|
||||||
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
|
|
||||||
blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
|
|
||||||
)
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
headers[i] = block.Header()
|
|
||||||
}
|
|
||||||
// Set the number of threads to verify on
|
|
||||||
old := runtime.GOMAXPROCS(threads)
|
|
||||||
defer runtime.GOMAXPROCS(old)
|
|
||||||
|
|
||||||
// Run the POW checker for the entire block chain at once
|
|
||||||
for i, full := range []bool{true, false} {
|
|
||||||
var abort chan<- struct{}
|
|
||||||
var results <-chan nonceCheckResult
|
|
||||||
|
|
||||||
// Start the verifications and immediately abort
|
|
||||||
if full {
|
|
||||||
abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
|
|
||||||
} else {
|
|
||||||
abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
|
|
||||||
}
|
|
||||||
close(abort)
|
|
||||||
|
|
||||||
// Deplete the results channel
|
|
||||||
verified := make(map[int]struct{})
|
|
||||||
for depleted := false; !depleted; {
|
|
||||||
select {
|
|
||||||
case result := <-results:
|
|
||||||
verified[result.index] = struct{}{}
|
|
||||||
case <-time.After(50 * time.Millisecond):
|
|
||||||
depleted = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Check that abortion was honored by not processing too many POWs
|
|
||||||
if len(verified) > 2*threads {
|
|
||||||
t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
|
|
||||||
}
|
|
||||||
// Check that there are no gaps in the results
|
|
||||||
for j := 0; j < len(verified); j++ {
|
|
||||||
if _, ok := verified[j]; !ok {
|
|
||||||
t.Errorf("test %d.%d: gap found in verification results", i, j)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -20,11 +20,11 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests that DAO-fork enabled clients can properly filter out fork-commencing
|
// Tests that DAO-fork enabled clients can properly filter out fork-commencing
|
||||||
|
@ -42,12 +42,12 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
proDb, _ := ethdb.NewMemDatabase()
|
proDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(proDb)
|
gspec.MustCommit(proDb)
|
||||||
proConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
|
proConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
|
||||||
proBc, _ := NewBlockChain(proDb, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
proBc, _ := NewBlockChain(proDb, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
conDb, _ := ethdb.NewMemDatabase()
|
conDb, _ := ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(conDb)
|
gspec.MustCommit(conDb)
|
||||||
conConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
|
conConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
|
||||||
conBc, _ := NewBlockChain(conDb, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
conBc, _ := NewBlockChain(conDb, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
if _, err := proBc.InsertChain(prefix); err != nil {
|
if _, err := proBc.InsertChain(prefix); err != nil {
|
||||||
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
|
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
|
||||||
|
@ -60,7 +60,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
// Create a pro-fork block, and try to feed into the no-fork chain
|
// Create a pro-fork block, and try to feed into the no-fork chain
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
||||||
for j := 0; j < len(blocks)/2; j++ {
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
@ -81,7 +81,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
// Create a no-fork block, and try to feed into the pro-fork chain
|
// Create a no-fork block, and try to feed into the pro-fork chain
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
||||||
for j := 0; j < len(blocks)/2; j++ {
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
@ -103,7 +103,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
|
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
|
||||||
for j := 0; j < len(blocks)/2; j++ {
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
@ -119,7 +119,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
|
||||||
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
|
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
|
bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
|
||||||
|
|
||||||
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
|
||||||
for j := 0; j < len(blocks)/2; j++ {
|
for j := 0; j < len(blocks)/2; j++ {
|
||||||
|
|
|
@ -18,14 +18,12 @@ package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
|
@ -34,58 +32,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type diffTest struct {
|
|
||||||
ParentTimestamp uint64
|
|
||||||
ParentDifficulty *big.Int
|
|
||||||
CurrentTimestamp uint64
|
|
||||||
CurrentBlocknumber *big.Int
|
|
||||||
CurrentDifficulty *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
|
|
||||||
var ext struct {
|
|
||||||
ParentTimestamp string
|
|
||||||
ParentDifficulty string
|
|
||||||
CurrentTimestamp string
|
|
||||||
CurrentBlocknumber string
|
|
||||||
CurrentDifficulty string
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(b, &ext); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
|
|
||||||
d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
|
|
||||||
d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
|
|
||||||
d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
|
|
||||||
d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCalcDifficulty(t *testing.T) {
|
|
||||||
file, err := os.Open("../tests/files/BasicTests/difficulty.json")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
tests := make(map[string]diffTest)
|
|
||||||
err = json.NewDecoder(file).Decode(&tests)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
|
||||||
for name, test := range tests {
|
|
||||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
|
||||||
diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
|
|
||||||
if diff.Cmp(test.CurrentDifficulty) != 0 {
|
|
||||||
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests block header storage and retrieval operations.
|
// Tests block header storage and retrieval operations.
|
||||||
func TestHeaderStorage(t *testing.T) {
|
func TestHeaderStorage(t *testing.T) {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
@ -562,7 +508,11 @@ func TestMipmapChain(t *testing.T) {
|
||||||
)
|
)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
genesis := testGenesis(addr, big.NewInt(1000000)).MustCommit(db)
|
gspec := &Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
|
||||||
|
}
|
||||||
|
genesis := gspec.MustCommit(db)
|
||||||
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
|
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
|
||||||
var receipts types.Receipts
|
var receipts types.Receipts
|
||||||
switch i {
|
switch i {
|
||||||
|
|
|
@ -23,11 +23,11 @@ import (
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDefaultGenesisBlock(t *testing.T) {
|
func TestDefaultGenesisBlock(t *testing.T) {
|
||||||
|
@ -119,7 +119,7 @@ func TestSetupGenesis(t *testing.T) {
|
||||||
// Commit the 'old' genesis block with Homestead transition at #2.
|
// Commit the 'old' genesis block with Homestead transition at #2.
|
||||||
// Advance to block #4, past the homestead transition block of customg.
|
// Advance to block #4, past the homestead transition block of customg.
|
||||||
genesis := oldcustomg.MustCommit(db)
|
genesis := oldcustomg.MustCommit(db)
|
||||||
bc, _ := NewBlockChain(db, oldcustomg.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
|
bc, _ := NewBlockChain(db, oldcustomg.Config, ethash.NewFullFaker(), new(event.TypeMux), vm.Config{})
|
||||||
bc.SetValidator(bproc{})
|
bc.SetValidator(bproc{})
|
||||||
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
|
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
|
||||||
bc.CurrentBlock()
|
bc.CurrentBlock()
|
||||||
|
|
|
@ -18,21 +18,19 @@ package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
mrand "math/rand"
|
mrand "math/rand"
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -62,18 +60,15 @@ type HeaderChain struct {
|
||||||
|
|
||||||
procInterrupt func() bool
|
procInterrupt func() bool
|
||||||
|
|
||||||
rand *mrand.Rand
|
rand *mrand.Rand
|
||||||
getValidator getHeaderValidatorFn
|
engine consensus.Engine
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHeaderValidatorFn returns a HeaderValidator interface
|
|
||||||
type getHeaderValidatorFn func() HeaderValidator
|
|
||||||
|
|
||||||
// NewHeaderChain creates a new HeaderChain structure.
|
// NewHeaderChain creates a new HeaderChain structure.
|
||||||
// getValidator should return the parent's validator
|
// getValidator should return the parent's validator
|
||||||
// procInterrupt points to the parent's interrupt semaphore
|
// procInterrupt points to the parent's interrupt semaphore
|
||||||
// wg points to the parent's shutdown wait group
|
// wg points to the parent's shutdown wait group
|
||||||
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
|
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
|
||||||
headerCache, _ := lru.New(headerCacheLimit)
|
headerCache, _ := lru.New(headerCacheLimit)
|
||||||
tdCache, _ := lru.New(tdCacheLimit)
|
tdCache, _ := lru.New(tdCacheLimit)
|
||||||
numberCache, _ := lru.New(numberCacheLimit)
|
numberCache, _ := lru.New(numberCacheLimit)
|
||||||
|
@ -92,7 +87,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
|
||||||
numberCache: numberCache,
|
numberCache: numberCache,
|
||||||
procInterrupt: procInterrupt,
|
procInterrupt: procInterrupt,
|
||||||
rand: mrand.New(mrand.NewSource(seed.Int64())),
|
rand: mrand.New(mrand.NewSource(seed.Int64())),
|
||||||
getValidator: getValidator,
|
engine: engine,
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.genesisHeader = hc.GetHeaderByNumber(0)
|
hc.genesisHeader = hc.GetHeaderByNumber(0)
|
||||||
|
@ -228,78 +223,34 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate the list of headers that should be POW verified
|
// Generate the list of seal verification requests, and start the parallel verifier
|
||||||
verify := make([]bool, len(chain))
|
seals := make([]bool, len(chain))
|
||||||
for i := 0; i < len(verify)/checkFreq; i++ {
|
for i := 0; i < len(seals)/checkFreq; i++ {
|
||||||
index := i*checkFreq + hc.rand.Intn(checkFreq)
|
index := i*checkFreq + hc.rand.Intn(checkFreq)
|
||||||
if index >= len(verify) {
|
if index >= len(seals) {
|
||||||
index = len(verify) - 1
|
index = len(seals) - 1
|
||||||
}
|
}
|
||||||
verify[index] = true
|
seals[index] = true
|
||||||
}
|
}
|
||||||
verify[len(verify)-1] = true // Last should always be verified to avoid junk
|
seals[len(seals)-1] = true // Last should always be verified to avoid junk
|
||||||
|
|
||||||
// Create the header verification task queue and worker functions
|
abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
|
||||||
tasks := make(chan int, len(chain))
|
defer close(abort)
|
||||||
for i := 0; i < len(chain); i++ {
|
|
||||||
tasks <- i
|
|
||||||
}
|
|
||||||
close(tasks)
|
|
||||||
|
|
||||||
errs, failed := make([]error, len(tasks)), int32(0)
|
// Iterate over the headers and ensure they all check out
|
||||||
process := func(worker int) {
|
for i, header := range chain {
|
||||||
for index := range tasks {
|
// If the chain is terminating, stop processing blocks
|
||||||
header, hash := chain[index], chain[index].Hash()
|
if hc.procInterrupt() {
|
||||||
|
log.Debug("Premature abort during headers verification")
|
||||||
// Short circuit insertion if shutting down or processing failed
|
return 0, errors.New("aborted")
|
||||||
if hc.procInterrupt() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if atomic.LoadInt32(&failed) > 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Short circuit if the header is bad or already known
|
|
||||||
if BadHashes[hash] {
|
|
||||||
errs[index] = BadHashError(hash)
|
|
||||||
atomic.AddInt32(&failed, 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if hc.HasHeader(hash) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Verify that the header honors the chain parameters
|
|
||||||
checkPow := verify[index]
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if index == 0 {
|
|
||||||
err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash, header.Number.Uint64()-1), checkPow)
|
|
||||||
} else {
|
|
||||||
err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
errs[index] = err
|
|
||||||
atomic.AddInt32(&failed, 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
// If the header is a banned one, straight out abort
|
||||||
// Start as many worker threads as goroutines allowed
|
if BadHashes[header.Hash()] {
|
||||||
pending := new(sync.WaitGroup)
|
return i, BadHashError(header.Hash())
|
||||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
}
|
||||||
pending.Add(1)
|
// Otherwise wait for headers checks and ensure they pass
|
||||||
go func(id int) {
|
if err := <-results; err != nil {
|
||||||
defer pending.Done()
|
return i, err
|
||||||
process(id)
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
pending.Wait()
|
|
||||||
|
|
||||||
// If anything failed, report
|
|
||||||
if failed > 0 {
|
|
||||||
for i, err := range errs {
|
|
||||||
if err != nil {
|
|
||||||
return i, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,13 +264,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
|
||||||
for i, header := range chain {
|
for i, header := range chain {
|
||||||
// Short circuit insertion if shutting down
|
// Short circuit insertion if shutting down
|
||||||
if hc.procInterrupt() {
|
if hc.procInterrupt() {
|
||||||
log.Debug("Premature abort during headers processing")
|
log.Debug("Premature abort during headers import")
|
||||||
break
|
return i, errors.New("aborted")
|
||||||
}
|
}
|
||||||
hash := header.Hash()
|
|
||||||
|
|
||||||
// If the header's already known, skip it, otherwise store
|
// If the header's already known, skip it, otherwise store
|
||||||
if hc.HasHeader(hash) {
|
if hc.GetHeader(header.Hash(), header.Number.Uint64()) != nil {
|
||||||
stats.ignored++
|
stats.ignored++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -490,35 +439,11 @@ func (hc *HeaderChain) SetGenesis(head *types.Header) {
|
||||||
hc.genesisHeader = head
|
hc.genesisHeader = head
|
||||||
}
|
}
|
||||||
|
|
||||||
// headerValidator is responsible for validating block headers
|
// Config retrieves the header chain's chain configuration.
|
||||||
//
|
func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
|
||||||
// headerValidator implements HeaderValidator.
|
|
||||||
type headerValidator struct {
|
|
||||||
config *params.ChainConfig
|
|
||||||
hc *HeaderChain // Canonical header chain
|
|
||||||
Pow pow.PoW // Proof of work used for validating
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlockValidator returns a new block validator which is safe for re-use
|
// GetBlock implements consensus.ChainReader, and returns nil for every input as
|
||||||
func NewHeaderValidator(config *params.ChainConfig, chain *HeaderChain, pow pow.PoW) HeaderValidator {
|
// a header chain does not have blocks available for retrieval.
|
||||||
return &headerValidator{
|
func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||||
config: config,
|
return nil
|
||||||
Pow: pow,
|
|
||||||
hc: chain,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateHeader validates the given header and, depending on the pow arg,
|
|
||||||
// checks the proof of work of the given header. Returns an error if the
|
|
||||||
// validation failed.
|
|
||||||
func (v *headerValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
|
|
||||||
// Short circuit if the parent is missing.
|
|
||||||
if parent == nil {
|
|
||||||
return ParentError(header.ParentHash)
|
|
||||||
}
|
|
||||||
// Short circuit if the header's already known or its parent missing
|
|
||||||
if v.hc.HasHeader(header.Hash()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,8 @@ package core
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -26,25 +28,22 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
big8 = big.NewInt(8)
|
|
||||||
big32 = big.NewInt(32)
|
|
||||||
)
|
|
||||||
|
|
||||||
// StateProcessor is a basic Processor, which takes care of transitioning
|
// StateProcessor is a basic Processor, which takes care of transitioning
|
||||||
// state from one point to another.
|
// state from one point to another.
|
||||||
//
|
//
|
||||||
// StateProcessor implements Processor.
|
// StateProcessor implements Processor.
|
||||||
type StateProcessor struct {
|
type StateProcessor struct {
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig // Chain configuration options
|
||||||
bc *BlockChain
|
bc *BlockChain // Canonical block chain
|
||||||
|
engine consensus.Engine // Consensus engine used for block rewards
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateProcessor initialises a new StateProcessor.
|
// NewStateProcessor initialises a new StateProcessor.
|
||||||
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain) *StateProcessor {
|
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor {
|
||||||
return &StateProcessor{
|
return &StateProcessor{
|
||||||
config: config,
|
config: config,
|
||||||
bc: bc,
|
bc: bc,
|
||||||
|
engine: engine,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,18 +58,16 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||||
var (
|
var (
|
||||||
receipts types.Receipts
|
receipts types.Receipts
|
||||||
totalUsedGas = big.NewInt(0)
|
totalUsedGas = big.NewInt(0)
|
||||||
err error
|
|
||||||
header = block.Header()
|
header = block.Header()
|
||||||
allLogs []*types.Log
|
allLogs []*types.Log
|
||||||
gp = new(GasPool).AddGas(block.GasLimit())
|
gp = new(GasPool).AddGas(block.GasLimit())
|
||||||
)
|
)
|
||||||
// Mutate the the block and state according to any hard-fork specs
|
// Mutate the the block and state according to any hard-fork specs
|
||||||
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
||||||
ApplyDAOHardFork(statedb)
|
misc.ApplyDAOHardFork(statedb)
|
||||||
}
|
}
|
||||||
// Iterate over and process the individual transactions
|
// Iterate over and process the individual transactions
|
||||||
for i, tx := range block.Transactions() {
|
for i, tx := range block.Transactions() {
|
||||||
//fmt.Println("tx:", i)
|
|
||||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||||
receipt, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
|
receipt, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -79,9 +76,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||||
receipts = append(receipts, receipt)
|
receipts = append(receipts, receipt)
|
||||||
allLogs = append(allLogs, receipt.Logs...)
|
allLogs = append(allLogs, receipt.Logs...)
|
||||||
}
|
}
|
||||||
AccumulateRewards(statedb, header, block.Uncles())
|
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
||||||
|
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), receipts)
|
||||||
|
|
||||||
return receipts, allLogs, totalUsedGas, err
|
return receipts, allLogs, totalUsedGas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyTransaction attempts to apply a transaction to the given state database
|
// ApplyTransaction attempts to apply a transaction to the given state database
|
||||||
|
@ -122,23 +120,3 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
|
||||||
|
|
||||||
return receipt, gas, err
|
return receipt, gas, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccumulateRewards credits the coinbase of the given block with the
|
|
||||||
// mining reward. The total reward consists of the static block reward
|
|
||||||
// and rewards for included uncles. The coinbase of each uncle block is
|
|
||||||
// also rewarded.
|
|
||||||
func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
|
|
||||||
reward := new(big.Int).Set(BlockReward)
|
|
||||||
r := new(big.Int)
|
|
||||||
for _, uncle := range uncles {
|
|
||||||
r.Add(uncle.Number, big8)
|
|
||||||
r.Sub(r, header.Number)
|
|
||||||
r.Mul(r, BlockReward)
|
|
||||||
r.Div(r, big8)
|
|
||||||
statedb.AddBalance(uncle.Coinbase, r)
|
|
||||||
|
|
||||||
r.Div(BlockReward, big32)
|
|
||||||
reward.Add(reward, r)
|
|
||||||
}
|
|
||||||
statedb.AddBalance(header.Coinbase, reward)
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,31 +24,17 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Validator is an interface which defines the standard for block validation.
|
// Validator is an interface which defines the standard for block validation. It
|
||||||
|
// is only responsible for validating block contents, as the header validation is
|
||||||
|
// done by the specific consensus engines.
|
||||||
//
|
//
|
||||||
// The validator is responsible for validating incoming block or, if desired,
|
|
||||||
// validates headers for fast validation.
|
|
||||||
//
|
|
||||||
// ValidateBlock validates the given block and should return an error if it
|
|
||||||
// failed to do so and should be used for "full" validation.
|
|
||||||
//
|
|
||||||
// ValidateHeader validates the given header and parent and returns an error
|
|
||||||
// if it failed to do so.
|
|
||||||
//
|
|
||||||
// ValidateState validates the given statedb and optionally the receipts and
|
|
||||||
// gas used. The implementer should decide what to do with the given input.
|
|
||||||
type Validator interface {
|
type Validator interface {
|
||||||
HeaderValidator
|
// ValidateBody validates the given block's content.
|
||||||
ValidateBlock(block *types.Block) error
|
ValidateBody(block *types.Block) error
|
||||||
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderValidator is an interface for validating headers only
|
// ValidateState validates the given statedb and optionally the receipts and
|
||||||
//
|
// gas used.
|
||||||
// ValidateHeader validates the given header and parent and returns an error
|
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
|
||||||
// if it failed to do so.
|
|
||||||
type HeaderValidator interface {
|
|
||||||
ValidateHeader(header, parent *types.Header, checkPow bool) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Processor is an interface for processing blocks using a given initial state.
|
// Processor is an interface for processing blocks using a given initial state.
|
||||||
|
|
|
@ -348,12 +348,11 @@ func CalcUncleHash(uncles []*Header) common.Hash {
|
||||||
return rlpHash(uncles)
|
return rlpHash(uncles)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithMiningResult returns a new block with the data from b
|
// WithSeal returns a new block with the data from b but the header replaced with
|
||||||
// where nonce and mix digest are set to the provided values.
|
// the sealed one.
|
||||||
func (b *Block) WithMiningResult(nonce BlockNonce, mixDigest common.Hash) *Block {
|
func (b *Block) WithSeal(header *Header) *Block {
|
||||||
cpy := *b.header
|
cpy := *header
|
||||||
cpy.Nonce = nonce
|
|
||||||
cpy.MixDigest = mixDigest
|
|
||||||
return &Block{
|
return &Block{
|
||||||
header: &cpy,
|
header: &cpy,
|
||||||
transactions: b.transactions,
|
transactions: b.transactions,
|
||||||
|
|
93
eth/api.go
93
eth/api.go
|
@ -26,7 +26,6 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -37,6 +36,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/miner"
|
"github.com/ethereum/go-ethereum/miner"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
@ -56,18 +56,18 @@ func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Etherbase is the address that mining rewards will be send to
|
// Etherbase is the address that mining rewards will be send to
|
||||||
func (s *PublicEthereumAPI) Etherbase() (common.Address, error) {
|
func (api *PublicEthereumAPI) Etherbase() (common.Address, error) {
|
||||||
return s.e.Etherbase()
|
return api.e.Etherbase()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
|
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
|
||||||
func (s *PublicEthereumAPI) Coinbase() (common.Address, error) {
|
func (api *PublicEthereumAPI) Coinbase() (common.Address, error) {
|
||||||
return s.Etherbase()
|
return api.Etherbase()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashrate returns the POW hashrate
|
// Hashrate returns the POW hashrate
|
||||||
func (s *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
|
func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
|
||||||
return hexutil.Uint64(s.e.Miner().HashRate())
|
return hexutil.Uint64(api.e.Miner().HashRate())
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicMinerAPI provides an API to control the miner.
|
// PublicMinerAPI provides an API to control the miner.
|
||||||
|
@ -79,34 +79,34 @@ type PublicMinerAPI struct {
|
||||||
|
|
||||||
// NewPublicMinerAPI create a new PublicMinerAPI instance.
|
// NewPublicMinerAPI create a new PublicMinerAPI instance.
|
||||||
func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
|
func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
|
||||||
agent := miner.NewRemoteAgent(e.Pow())
|
agent := miner.NewRemoteAgent(e.BlockChain(), e.Engine())
|
||||||
e.Miner().Register(agent)
|
e.Miner().Register(agent)
|
||||||
|
|
||||||
return &PublicMinerAPI{e, agent}
|
return &PublicMinerAPI{e, agent}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mining returns an indication if this node is currently mining.
|
// Mining returns an indication if this node is currently mining.
|
||||||
func (s *PublicMinerAPI) Mining() bool {
|
func (api *PublicMinerAPI) Mining() bool {
|
||||||
return s.e.IsMining()
|
return api.e.IsMining()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
|
// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
|
||||||
// accepted. Note, this is not an indication if the provided work was valid!
|
// accepted. Note, this is not an indication if the provided work was valid!
|
||||||
func (s *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
|
func (api *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
|
||||||
return s.agent.SubmitWork(nonce, digest, solution)
|
return api.agent.SubmitWork(nonce, digest, solution)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetWork returns a work package for external miner. The work package consists of 3 strings
|
// GetWork returns a work package for external miner. The work package consists of 3 strings
|
||||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||||
func (s *PublicMinerAPI) GetWork() ([3]string, error) {
|
func (api *PublicMinerAPI) GetWork() ([3]string, error) {
|
||||||
if !s.e.IsMining() {
|
if !api.e.IsMining() {
|
||||||
if err := s.e.StartMining(0); err != nil {
|
if err := api.e.StartMining(); err != nil {
|
||||||
return [3]string{}, err
|
return [3]string{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
work, err := s.agent.GetWork()
|
work, err := api.agent.GetWork()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return work, fmt.Errorf("mining not ready: %v", err)
|
return work, fmt.Errorf("mining not ready: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -116,8 +116,8 @@ func (s *PublicMinerAPI) GetWork() ([3]string, error) {
|
||||||
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
||||||
// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
|
// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
|
||||||
// must be unique between nodes.
|
// must be unique between nodes.
|
||||||
func (s *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
|
func (api *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
|
||||||
s.agent.SubmitHashrate(id, uint64(hashrate))
|
api.agent.SubmitHashrate(id, uint64(hashrate))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,47 +132,59 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
|
||||||
return &PrivateMinerAPI{e: e}
|
return &PrivateMinerAPI{e: e}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the miner with the given number of threads. If threads is nil the number of
|
// Start the miner with the given number of threads. If threads is nil the number
|
||||||
// workers started is equal to the number of logical CPU's that are usable by this process.
|
// of workers started is equal to the number of logical CPUs that are usable by
|
||||||
func (s *PrivateMinerAPI) Start(threads *int) (bool, error) {
|
// this process. If mining is already running, this method adjust the number of
|
||||||
var err error
|
// threads allowed to use.
|
||||||
if threads == nil {
|
func (api *PrivateMinerAPI) Start(threads *int) error {
|
||||||
err = s.e.StartMining(runtime.NumCPU())
|
// Set the number of threads if the seal engine supports it
|
||||||
} else {
|
if threads != nil {
|
||||||
err = s.e.StartMining(*threads)
|
type threaded interface {
|
||||||
|
SetThreads(threads int)
|
||||||
|
}
|
||||||
|
if th, ok := api.e.engine.(threaded); ok {
|
||||||
|
log.Info("Updated mining threads", "threads", *threads)
|
||||||
|
th.SetThreads(*threads)
|
||||||
|
} else {
|
||||||
|
log.Warn("Current seal engine isn't threaded")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return err == nil, err
|
// Start the miner and return
|
||||||
|
if !api.e.IsMining() {
|
||||||
|
return api.e.StartMining()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the miner
|
// Stop the miner
|
||||||
func (s *PrivateMinerAPI) Stop() bool {
|
func (api *PrivateMinerAPI) Stop() bool {
|
||||||
s.e.StopMining()
|
api.e.StopMining()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetExtra sets the extra data string that is included when this miner mines a block.
|
// SetExtra sets the extra data string that is included when this miner mines a block.
|
||||||
func (s *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
|
func (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
|
||||||
if err := s.e.Miner().SetExtra([]byte(extra)); err != nil {
|
if err := api.e.Miner().SetExtra([]byte(extra)); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetGasPrice sets the minimum accepted gas price for the miner.
|
// SetGasPrice sets the minimum accepted gas price for the miner.
|
||||||
func (s *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
|
func (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
|
||||||
s.e.Miner().SetGasPrice((*big.Int)(&gasPrice))
|
api.e.Miner().SetGasPrice((*big.Int)(&gasPrice))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetEtherbase sets the etherbase of the miner
|
// SetEtherbase sets the etherbase of the miner
|
||||||
func (s *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
|
func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
|
||||||
s.e.SetEtherbase(etherbase)
|
api.e.SetEtherbase(etherbase)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHashrate returns the current hashrate of the miner.
|
// GetHashrate returns the current hashrate of the miner.
|
||||||
func (s *PrivateMinerAPI) GetHashrate() uint64 {
|
func (api *PrivateMinerAPI) GetHashrate() uint64 {
|
||||||
return uint64(s.e.miner.HashRate())
|
return uint64(api.e.miner.HashRate())
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrivateAdminAPI is the collection of Etheruem full node-related APIs
|
// PrivateAdminAPI is the collection of Etheruem full node-related APIs
|
||||||
|
@ -321,7 +333,7 @@ type TraceArgs struct {
|
||||||
Timeout *string
|
Timeout *string
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceBlock processes the given block's RLP but does not import the block in to
|
// TraceBlock processes the given block'api RLP but does not import the block in to
|
||||||
// the chain.
|
// the chain.
|
||||||
func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) BlockTraceResult {
|
func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) BlockTraceResult {
|
||||||
var block types.Block
|
var block types.Block
|
||||||
|
@ -338,7 +350,7 @@ func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) Bl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceBlockFromFile loads the block's RLP from the given file name and attempts to
|
// TraceBlockFromFile loads the block'api RLP from the given file name and attempts to
|
||||||
// process it but does not import the block in to the chain.
|
// process it but does not import the block in to the chain.
|
||||||
func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.LogConfig) BlockTraceResult {
|
func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.LogConfig) BlockTraceResult {
|
||||||
blockRlp, err := ioutil.ReadFile(file)
|
blockRlp, err := ioutil.ReadFile(file)
|
||||||
|
@ -395,8 +407,7 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
|
||||||
Debug: true,
|
Debug: true,
|
||||||
Tracer: structLogger,
|
Tracer: structLogger,
|
||||||
}
|
}
|
||||||
|
if err := api.eth.engine.VerifyHeader(blockchain, block.Header(), true); err != nil {
|
||||||
if err := core.ValidateHeader(api.config, blockchain.AuxValidator(), block.Header(), blockchain.GetHeader(block.ParentHash(), block.NumberU64()-1), true, false); err != nil {
|
|
||||||
return false, structLogger.StructLogs(), err
|
return false, structLogger.StructLogs(), err
|
||||||
}
|
}
|
||||||
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
||||||
|
|
|
@ -22,10 +22,11 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -40,18 +41,9 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
epochLength = 30000
|
|
||||||
ethashRevision = 23
|
|
||||||
|
|
||||||
autoDAGcheckInterval = 10 * time.Hour
|
|
||||||
autoDAGepochHeight = epochLength / 2
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
|
datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
|
||||||
portInUseErrRE = regexp.MustCompile("address already in use")
|
portInUseErrRE = regexp.MustCompile("address already in use")
|
||||||
|
@ -124,7 +116,7 @@ type Ethereum struct {
|
||||||
chainDb ethdb.Database // Block chain database
|
chainDb ethdb.Database // Block chain database
|
||||||
|
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
accountManager *accounts.Manager
|
accountManager *accounts.Manager
|
||||||
|
|
||||||
ApiBackend *EthApiBackend
|
ApiBackend *EthApiBackend
|
||||||
|
@ -163,7 +155,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
eventMux: ctx.EventMux,
|
eventMux: ctx.EventMux,
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
pow: CreatePoW(ctx, config),
|
engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
stopDbUpgrade: stopDbUpgrade,
|
stopDbUpgrade: stopDbUpgrade,
|
||||||
netVersionId: config.NetworkId,
|
netVersionId: config.NetworkId,
|
||||||
|
@ -186,7 +178,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
vmConfig := vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
|
vmConfig := vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
|
||||||
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.eventMux, vmConfig)
|
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.engine, eth.eventMux, vmConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -211,10 +203,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
|
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.pow)
|
|
||||||
|
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
|
||||||
eth.miner.SetGasPrice(config.GasPrice)
|
eth.miner.SetGasPrice(config.GasPrice)
|
||||||
eth.miner.SetExtra(config.ExtraData)
|
eth.miner.SetExtra(config.ExtraData)
|
||||||
|
|
||||||
|
@ -241,20 +234,20 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
||||||
return db, err
|
return db, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePoW creates the required type of PoW instance for an Ethereum service
|
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||||
func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
|
func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
||||||
switch {
|
switch {
|
||||||
case config.PowFake:
|
case config.PowFake:
|
||||||
log.Warn("Ethash used in fake mode")
|
log.Warn("Ethash used in fake mode")
|
||||||
return pow.FakePow{}
|
return ethash.NewFaker()
|
||||||
case config.PowTest:
|
case config.PowTest:
|
||||||
log.Warn("Ethash used in test mode")
|
log.Warn("Ethash used in test mode")
|
||||||
return pow.NewTestEthash()
|
return ethash.NewTester()
|
||||||
case config.PowShared:
|
case config.PowShared:
|
||||||
log.Warn("Ethash used in shared mode")
|
log.Warn("Ethash used in shared mode")
|
||||||
return pow.NewSharedEthash()
|
return ethash.NewShared()
|
||||||
default:
|
default:
|
||||||
return pow.NewFullEthash(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
|
return ethash.New(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
|
||||||
config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
|
config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -262,7 +255,13 @@ func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
|
||||||
// APIs returns the collection of RPC services the ethereum package offers.
|
// APIs returns the collection of RPC services the ethereum package offers.
|
||||||
// NOTE, some of these services probably need to be moved to somewhere else.
|
// NOTE, some of these services probably need to be moved to somewhere else.
|
||||||
func (s *Ethereum) APIs() []rpc.API {
|
func (s *Ethereum) APIs() []rpc.API {
|
||||||
return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{
|
apis := ethapi.GetAPIs(s.ApiBackend, s.solcPath)
|
||||||
|
|
||||||
|
// Append any APIs exposed explicitly by the consensus engine
|
||||||
|
apis = append(apis, s.engine.APIs(s.BlockChain())...)
|
||||||
|
|
||||||
|
// Append all the local APIs and return
|
||||||
|
return append(apis, []rpc.API{
|
||||||
{
|
{
|
||||||
Namespace: "eth",
|
Namespace: "eth",
|
||||||
Version: "1.0",
|
Version: "1.0",
|
||||||
|
@ -332,13 +331,13 @@ func (self *Ethereum) SetEtherbase(etherbase common.Address) {
|
||||||
self.miner.SetEtherbase(etherbase)
|
self.miner.SetEtherbase(etherbase)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Ethereum) StartMining(threads int) error {
|
func (s *Ethereum) StartMining() error {
|
||||||
eb, err := s.Etherbase()
|
eb, err := s.Etherbase()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Cannot start mining without etherbase", "err", err)
|
log.Error("Cannot start mining without etherbase", "err", err)
|
||||||
return fmt.Errorf("etherbase missing: %v", err)
|
return fmt.Errorf("etherbase missing: %v", err)
|
||||||
}
|
}
|
||||||
go s.miner.Start(eb, threads)
|
go s.miner.Start(eb)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,7 +349,7 @@ func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager
|
||||||
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
|
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
|
||||||
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
|
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
|
||||||
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
|
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
|
||||||
func (s *Ethereum) Pow() pow.PoW { return s.pow }
|
func (s *Ethereum) Engine() consensus.Engine { return s.engine }
|
||||||
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
|
||||||
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
func (s *Ethereum) IsListening() bool { return true } // Always listening
|
||||||
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
|
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
|
||||||
|
|
|
@ -52,8 +52,8 @@ type headerRequesterFn func(common.Hash) error
|
||||||
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
||||||
type bodyRequesterFn func([]common.Hash) error
|
type bodyRequesterFn func([]common.Hash) error
|
||||||
|
|
||||||
// blockValidatorFn is a callback type to verify a block's header for fast propagation.
|
// headerVerifierFn is a callback type to verify a block's header for fast propagation.
|
||||||
type blockValidatorFn func(block *types.Block, parent *types.Block) error
|
type headerVerifierFn func(header *types.Header) error
|
||||||
|
|
||||||
// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
|
// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
|
||||||
type blockBroadcasterFn func(block *types.Block, propagate bool)
|
type blockBroadcasterFn func(block *types.Block, propagate bool)
|
||||||
|
@ -129,7 +129,7 @@ type Fetcher struct {
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||||
validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work
|
verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
|
||||||
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
|
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
|
||||||
chainHeight chainHeightFn // Retrieves the current chain's height
|
chainHeight chainHeightFn // Retrieves the current chain's height
|
||||||
insertChain chainInsertFn // Injects a batch of blocks into the chain
|
insertChain chainInsertFn // Injects a batch of blocks into the chain
|
||||||
|
@ -144,7 +144,7 @@ type Fetcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a block fetcher to retrieve blocks based on hash announcements.
|
// New creates a block fetcher to retrieve blocks based on hash announcements.
|
||||||
func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
|
func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
|
||||||
return &Fetcher{
|
return &Fetcher{
|
||||||
notify: make(chan *announce),
|
notify: make(chan *announce),
|
||||||
inject: make(chan *inject),
|
inject: make(chan *inject),
|
||||||
|
@ -162,7 +162,7 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
|
||||||
queues: make(map[string]int),
|
queues: make(map[string]int),
|
||||||
queued: make(map[common.Hash]*inject),
|
queued: make(map[common.Hash]*inject),
|
||||||
getBlock: getBlock,
|
getBlock: getBlock,
|
||||||
validateBlock: validateBlock,
|
verifyHeader: verifyHeader,
|
||||||
broadcastBlock: broadcastBlock,
|
broadcastBlock: broadcastBlock,
|
||||||
chainHeight: chainHeight,
|
chainHeight: chainHeight,
|
||||||
insertChain: insertChain,
|
insertChain: insertChain,
|
||||||
|
@ -648,7 +648,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Quickly validate the header and propagate the block if it passes
|
// Quickly validate the header and propagate the block if it passes
|
||||||
switch err := f.validateBlock(block, parent); err {
|
switch err := f.verifyHeader(block.Header()); err {
|
||||||
case nil:
|
case nil:
|
||||||
// All ok, quickly propagate to our peers
|
// All ok, quickly propagate to our peers
|
||||||
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
|
||||||
|
|
|
@ -91,7 +91,7 @@ func newTester() *fetcherTester {
|
||||||
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
|
||||||
drops: make(map[string]bool),
|
drops: make(map[string]bool),
|
||||||
}
|
}
|
||||||
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
|
||||||
tester.fetcher.Start()
|
tester.fetcher.Start()
|
||||||
|
|
||||||
return tester
|
return tester
|
||||||
|
@ -105,8 +105,8 @@ func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
|
||||||
return f.blocks[hash]
|
return f.blocks[hash]
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyBlock is a nop placeholder for the block header verification.
|
// verifyHeader is a nop placeholder for the block header verification.
|
||||||
func (f *fetcherTester) verifyBlock(block *types.Block, parent *types.Block) error {
|
func (f *fetcherTester) verifyHeader(header *types.Header) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
|
@ -37,7 +39,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ type ProtocolManager struct {
|
||||||
|
|
||||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
// with the ethereum network.
|
// with the ethereum network.
|
||||||
func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||||
// Create the protocol manager with the base fields
|
// Create the protocol manager with the base fields
|
||||||
manager := &ProtocolManager{
|
manager := &ProtocolManager{
|
||||||
networkId: networkId,
|
networkId: networkId,
|
||||||
|
@ -165,8 +166,8 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
|
||||||
blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
|
||||||
manager.removePeer)
|
manager.removePeer)
|
||||||
|
|
||||||
validator := func(block *types.Block, parent *types.Block) error {
|
validator := func(header *types.Header) error {
|
||||||
return core.ValidateHeader(config, pow, block.Header(), parent.Header(), true, false)
|
return engine.VerifyHeader(blockchain, header, true)
|
||||||
}
|
}
|
||||||
heighter := func() uint64 {
|
heighter := func() uint64 {
|
||||||
return blockchain.CurrentBlock().NumberU64()
|
return blockchain.CurrentBlock().NumberU64()
|
||||||
|
@ -448,7 +449,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
p.forkDrop = nil
|
p.forkDrop = nil
|
||||||
|
|
||||||
// Validate the header and either drop the peer or continue
|
// Validate the header and either drop the peer or continue
|
||||||
if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
|
if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
|
||||||
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
|
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -34,7 +35,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var bigTxGas = new(big.Int).SetUint64(params.TxGas)
|
var bigTxGas = new(big.Int).SetUint64(params.TxGas)
|
||||||
|
@ -469,7 +469,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
|
||||||
// Create a DAO aware protocol manager
|
// Create a DAO aware protocol manager
|
||||||
var (
|
var (
|
||||||
evmux = new(event.TypeMux)
|
evmux = new(event.TypeMux)
|
||||||
pow = new(pow.FakePow)
|
pow = ethash.NewFaker()
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
|
config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
|
||||||
gspec = &core.Genesis{Config: config}
|
gspec = &core.Genesis{Config: config}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -37,7 +38,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -50,22 +50,22 @@ var (
|
||||||
// channels for different events.
|
// channels for different events.
|
||||||
func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) {
|
func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) {
|
||||||
var (
|
var (
|
||||||
evmux = new(event.TypeMux)
|
evmux = new(event.TypeMux)
|
||||||
pow = new(pow.FakePow)
|
engine = ethash.NewFaker()
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec = &core.Genesis{
|
gspec = &core.Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
|
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
|
||||||
}
|
}
|
||||||
genesis = gspec.MustCommit(db)
|
genesis = gspec.MustCommit(db)
|
||||||
blockchain, _ = core.NewBlockChain(db, gspec.Config, pow, evmux, vm.Config{})
|
blockchain, _ = core.NewBlockChain(db, gspec.Config, engine, evmux, vm.Config{})
|
||||||
)
|
)
|
||||||
chain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
|
chain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
|
||||||
if _, err := blockchain.InsertChain(chain); err != nil {
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pm, err := NewProtocolManager(gspec.Config, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
|
pm, err := NewProtocolManager(gspec.Config, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -39,7 +40,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
@ -1378,7 +1378,7 @@ func (api *PublicDebugAPI) SeedHash(ctx context.Context, number uint64) (string,
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return "", fmt.Errorf("block #%d not found", number)
|
return "", fmt.Errorf("block #%d not found", number)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("0x%x", pow.EthashSeedHash(number)), nil
|
return fmt.Sprintf("0x%x", ethash.SeedHash(number)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrivateDebugAPI is the collection of Etheruem APIs exposed over the private
|
// PrivateDebugAPI is the collection of Etheruem APIs exposed over the private
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/compiler"
|
"github.com/ethereum/go-ethereum/common/compiler"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
@ -39,7 +40,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
rpc "github.com/ethereum/go-ethereum/rpc"
|
rpc "github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ type LightEthereum struct {
|
||||||
ApiBackend *LesApiBackend
|
ApiBackend *LesApiBackend
|
||||||
|
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
accountManager *accounts.Manager
|
accountManager *accounts.Manager
|
||||||
solcPath string
|
solcPath string
|
||||||
solc *compiler.Solidity
|
solc *compiler.Solidity
|
||||||
|
@ -88,14 +88,12 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
eventMux: ctx.EventMux,
|
eventMux: ctx.EventMux,
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
pow: eth.CreatePoW(ctx, config),
|
engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
netVersionId: config.NetworkId,
|
netVersionId: config.NetworkId,
|
||||||
solcPath: config.SolcPath,
|
solcPath: config.SolcPath,
|
||||||
}
|
}
|
||||||
|
if eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.engine, eth.eventMux); err != nil {
|
||||||
eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.pow, eth.eventMux)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Rewind the chain in case of an incompatible config upgrade.
|
// Rewind the chain in case of an incompatible config upgrade.
|
||||||
|
@ -106,7 +104,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay)
|
eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay)
|
||||||
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.pow, eth.blockchain, nil, chainDb, odr, relay); err != nil {
|
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.engine, eth.blockchain, nil, chainDb, odr, relay); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
relay.ps = eth.protocolManager.peers
|
relay.ps = eth.protocolManager.peers
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -39,7 +40,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
@ -128,7 +128,7 @@ type ProtocolManager struct {
|
||||||
|
|
||||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
// with the ethereum network.
|
// with the ethereum network.
|
||||||
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, pow pow.PoW, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) {
|
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, engine consensus.Engine, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) {
|
||||||
// Create the protocol manager with the base fields
|
// Create the protocol manager with the base fields
|
||||||
manager := &ProtocolManager{
|
manager := &ProtocolManager{
|
||||||
lightSync: lightSync,
|
lightSync: lightSync,
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
@ -39,7 +40,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -134,10 +134,10 @@ func testRCL() RequestCostList {
|
||||||
// channels for different events.
|
// channels for different events.
|
||||||
func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr, error) {
|
func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr, error) {
|
||||||
var (
|
var (
|
||||||
evmux = new(event.TypeMux)
|
evmux = new(event.TypeMux)
|
||||||
pow = new(pow.FakePow)
|
engine = ethash.NewFaker()
|
||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
gspec = core.Genesis{
|
gspec = core.Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
|
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
|
||||||
}
|
}
|
||||||
|
@ -148,9 +148,9 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
|
||||||
|
|
||||||
if lightSync {
|
if lightSync {
|
||||||
odr = NewLesOdr(db)
|
odr = NewLesOdr(db)
|
||||||
chain, _ = light.NewLightChain(odr, gspec.Config, pow, evmux)
|
chain, _ = light.NewLightChain(odr, gspec.Config, engine, evmux)
|
||||||
} else {
|
} else {
|
||||||
blockchain, _ := core.NewBlockChain(db, gspec.Config, pow, evmux, vm.Config{})
|
blockchain, _ := core.NewBlockChain(db, gspec.Config, engine, evmux, vm.Config{})
|
||||||
gchain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
|
gchain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
|
||||||
if _, err := blockchain.InsertChain(gchain); err != nil {
|
if _, err := blockchain.InsertChain(gchain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -158,7 +158,7 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
|
||||||
chain = blockchain
|
chain = blockchain
|
||||||
}
|
}
|
||||||
|
|
||||||
pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, pow, chain, nil, db, odr, nil)
|
pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, engine, chain, nil, db, odr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ type LesServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||||
pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Pow(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil)
|
pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Engine(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,13 +24,13 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
@ -64,14 +64,13 @@ type LightChain struct {
|
||||||
procInterrupt int32 // interrupt signaler for block processing
|
procInterrupt int32 // interrupt signaler for block processing
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
validator core.HeaderValidator
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLightChain returns a fully initialised light chain using information
|
// NewLightChain returns a fully initialised light chain using information
|
||||||
// available in the database. It initialises the default Ethereum header
|
// available in the database. It initialises the default Ethereum header
|
||||||
// validator.
|
// validator.
|
||||||
func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux *event.TypeMux) (*LightChain, error) {
|
func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux) (*LightChain, error) {
|
||||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||||
blockCache, _ := lru.New(blockCacheLimit)
|
blockCache, _ := lru.New(blockCacheLimit)
|
||||||
|
@ -84,21 +83,17 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
|
||||||
bodyCache: bodyCache,
|
bodyCache: bodyCache,
|
||||||
bodyRLPCache: bodyRLPCache,
|
bodyRLPCache: bodyRLPCache,
|
||||||
blockCache: blockCache,
|
blockCache: blockCache,
|
||||||
pow: pow,
|
engine: engine,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.Validator, bc.getProcInterrupt)
|
bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.engine, bc.getProcInterrupt)
|
||||||
bc.SetValidator(core.NewHeaderValidator(config, bc.hc, pow))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0)
|
bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0)
|
||||||
if bc.genesisBlock == nil {
|
if bc.genesisBlock == nil {
|
||||||
return nil, core.ErrNoGenesis
|
return nil, core.ErrNoGenesis
|
||||||
}
|
}
|
||||||
|
|
||||||
if bc.genesisBlock.Hash() == params.MainNetGenesisHash {
|
if bc.genesisBlock.Hash() == params.MainNetGenesisHash {
|
||||||
// add trusted CHT
|
// add trusted CHT
|
||||||
WriteTrustedCht(bc.chainDb, TrustedCht{Number: 805, Root: common.HexToHash("85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f")})
|
WriteTrustedCht(bc.chainDb, TrustedCht{Number: 805, Root: common.HexToHash("85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f")})
|
||||||
|
@ -145,9 +140,6 @@ func (self *LightChain) loadLastState() error {
|
||||||
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
|
||||||
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
|
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
|
||||||
|
|
||||||
// Try to be smart and issue a pow verification for the head to pre-generate caches
|
|
||||||
go self.pow.Verify(types.NewBlockWithHeader(header))
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,20 +180,6 @@ func (self *LightChain) Status() (td *big.Int, currentBlock common.Hash, genesis
|
||||||
return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash()
|
return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetValidator sets the validator which is used to validate incoming headers.
|
|
||||||
func (self *LightChain) SetValidator(validator core.HeaderValidator) {
|
|
||||||
self.procmu.Lock()
|
|
||||||
defer self.procmu.Unlock()
|
|
||||||
self.validator = validator
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validator returns the current header validator.
|
|
||||||
func (self *LightChain) Validator() core.HeaderValidator {
|
|
||||||
self.procmu.RLock()
|
|
||||||
defer self.procmu.RUnlock()
|
|
||||||
return self.validator
|
|
||||||
}
|
|
||||||
|
|
||||||
// State returns a new mutable state based on the current HEAD block.
|
// State returns a new mutable state based on the current HEAD block.
|
||||||
func (self *LightChain) State() *LightState {
|
func (self *LightChain) State() *LightState {
|
||||||
return NewLightState(StateTrieID(self.hc.CurrentHeader()), self.odr)
|
return NewLightState(StateTrieID(self.hc.CurrentHeader()), self.odr)
|
||||||
|
|
|
@ -23,12 +23,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// So we can deterministically seed different blockchains
|
// So we can deterministically seed different blockchains
|
||||||
|
@ -49,18 +49,15 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
|
||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
func testChainConfig() *params.ChainConfig {
|
|
||||||
return ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||||
// header only chain.
|
// header only chain.
|
||||||
func newCanonical(n int) (ethdb.Database, *LightChain, error) {
|
func newCanonical(n int) (ethdb.Database, *LightChain, error) {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
gspec := core.Genesis{Config: testChainConfig()}
|
gspec := core.Genesis{Config: params.TestChainConfig}
|
||||||
genesis := gspec.MustCommit(db)
|
genesis := gspec.MustCommit(db)
|
||||||
blockchain, _ := NewLightChain(&dummyOdr{db: db}, gspec.Config, pow.FakePow{}, new(event.TypeMux))
|
blockchain, _ := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFaker(), new(event.TypeMux))
|
||||||
|
|
||||||
// Create and inject the requested chain
|
// Create and inject the requested chain
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return db, blockchain, nil
|
return db, blockchain, nil
|
||||||
|
@ -76,14 +73,13 @@ func newTestLightChain() *LightChain {
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
gspec := &core.Genesis{
|
gspec := &core.Genesis{
|
||||||
Difficulty: big.NewInt(1),
|
Difficulty: big.NewInt(1),
|
||||||
Config: testChainConfig(),
|
Config: params.TestChainConfig,
|
||||||
}
|
}
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, pow.NewTestEthash(), new(event.TypeMux))
|
lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker(), new(event.TypeMux))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
lc.SetValidator(bproc{})
|
|
||||||
return lc
|
return lc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,17 +126,17 @@ func printChain(bc *LightChain) {
|
||||||
|
|
||||||
// testHeaderChainImport tries to process a chain of header, writing them into
|
// testHeaderChainImport tries to process a chain of header, writing them into
|
||||||
// the database if successful.
|
// the database if successful.
|
||||||
func testHeaderChainImport(chain []*types.Header, LightChain *LightChain) error {
|
func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error {
|
||||||
for _, header := range chain {
|
for _, header := range chain {
|
||||||
// Try and validate the header
|
// Try and validate the header
|
||||||
if err := LightChain.Validator().ValidateHeader(header, LightChain.GetHeaderByHash(header.ParentHash), false); err != nil {
|
if err := lightchain.engine.VerifyHeader(lightchain.hc, header, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
|
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
|
||||||
LightChain.mu.Lock()
|
lightchain.mu.Lock()
|
||||||
core.WriteTd(LightChain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, LightChain.GetTdByHash(header.ParentHash)))
|
core.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash)))
|
||||||
core.WriteHeader(LightChain.chainDb, header)
|
core.WriteHeader(lightchain.chainDb, header)
|
||||||
LightChain.mu.Unlock()
|
lightchain.mu.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -257,10 +253,6 @@ func TestBrokenHeaderChain(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type bproc struct{}
|
|
||||||
|
|
||||||
func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
|
|
||||||
|
|
||||||
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
|
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
|
||||||
var chain []*types.Header
|
var chain []*types.Header
|
||||||
for i, difficulty := range d {
|
for i, difficulty := range d {
|
||||||
|
@ -359,7 +351,7 @@ func TestReorgBadHeaderHashes(t *testing.T) {
|
||||||
defer func() { delete(core.BadHashes, headers[3].Hash()) }()
|
defer func() { delete(core.BadHashes, headers[3].Hash()) }()
|
||||||
|
|
||||||
// Create a new LightChain and check that it rolled back the state.
|
// Create a new LightChain and check that it rolled back the state.
|
||||||
ncm, err := NewLightChain(&dummyOdr{db: bc.chainDb}, testChainConfig(), pow.FakePow{}, new(event.TypeMux))
|
ncm, err := NewLightChain(&dummyOdr{db: bc.chainDb}, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new chain manager: %v", err)
|
t.Fatalf("failed to create new chain manager: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -34,7 +35,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
@ -248,7 +248,6 @@ func testChainGen(i int, block *core.BlockGen) {
|
||||||
func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
|
func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
|
||||||
var (
|
var (
|
||||||
evmux = new(event.TypeMux)
|
evmux = new(event.TypeMux)
|
||||||
pow = new(pow.FakePow)
|
|
||||||
sdb, _ = ethdb.NewMemDatabase()
|
sdb, _ = ethdb.NewMemDatabase()
|
||||||
ldb, _ = ethdb.NewMemDatabase()
|
ldb, _ = ethdb.NewMemDatabase()
|
||||||
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
|
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
|
||||||
|
@ -256,16 +255,14 @@ func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
|
||||||
)
|
)
|
||||||
gspec.MustCommit(ldb)
|
gspec.MustCommit(ldb)
|
||||||
// Assemble the test environment
|
// Assemble the test environment
|
||||||
blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux, vm.Config{})
|
blockchain, _ := core.NewBlockChain(sdb, params.TestChainConfig, ethash.NewFullFaker(), evmux, vm.Config{})
|
||||||
chainConfig := ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}
|
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, sdb, 4, testChainGen)
|
||||||
gchain, _ := core.GenerateChain(chainConfig, genesis, sdb, 4, testChainGen)
|
|
||||||
if _, err := blockchain.InsertChain(gchain); err != nil {
|
if _, err := blockchain.InsertChain(gchain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
odr := &testOdr{sdb: sdb, ldb: ldb}
|
odr := &testOdr{sdb: sdb, ldb: ldb}
|
||||||
lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
|
lightchain, _ := NewLightChain(odr, params.TestChainConfig, ethash.NewFullFaker(), evmux)
|
||||||
lightchain.SetValidator(bproc{})
|
|
||||||
headers := make([]*types.Header, len(gchain))
|
headers := make([]*types.Header, len(gchain))
|
||||||
for i, block := range gchain {
|
for i, block := range gchain {
|
||||||
headers[i] = block.Header()
|
headers[i] = block.Header()
|
||||||
|
|
|
@ -24,13 +24,13 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type testTxRelay struct {
|
type testTxRelay struct {
|
||||||
|
@ -83,7 +83,6 @@ func TestTxPool(t *testing.T) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
evmux = new(event.TypeMux)
|
evmux = new(event.TypeMux)
|
||||||
pow = new(pow.FakePow)
|
|
||||||
sdb, _ = ethdb.NewMemDatabase()
|
sdb, _ = ethdb.NewMemDatabase()
|
||||||
ldb, _ = ethdb.NewMemDatabase()
|
ldb, _ = ethdb.NewMemDatabase()
|
||||||
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
|
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
|
||||||
|
@ -91,9 +90,8 @@ func TestTxPool(t *testing.T) {
|
||||||
)
|
)
|
||||||
gspec.MustCommit(ldb)
|
gspec.MustCommit(ldb)
|
||||||
// Assemble the test environment
|
// Assemble the test environment
|
||||||
blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux, vm.Config{})
|
blockchain, _ := core.NewBlockChain(sdb, params.TestChainConfig, ethash.NewFullFaker(), evmux, vm.Config{})
|
||||||
chainConfig := ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}
|
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, sdb, poolTestBlocks, txPoolTestChainGen)
|
||||||
gchain, _ := core.GenerateChain(chainConfig, genesis, sdb, poolTestBlocks, txPoolTestChainGen)
|
|
||||||
if _, err := blockchain.InsertChain(gchain); err != nil {
|
if _, err := blockchain.InsertChain(gchain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -104,10 +102,9 @@ func TestTxPool(t *testing.T) {
|
||||||
discard: make(chan int, 1),
|
discard: make(chan int, 1),
|
||||||
mined: make(chan int, 1),
|
mined: make(chan int, 1),
|
||||||
}
|
}
|
||||||
lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
|
lightchain, _ := NewLightChain(odr, params.TestChainConfig, ethash.NewFullFaker(), evmux)
|
||||||
lightchain.SetValidator(bproc{})
|
|
||||||
txPermanent = 50
|
txPermanent = 50
|
||||||
pool := NewTxPool(testChainConfig(), evmux, lightchain, relay)
|
pool := NewTxPool(params.TestChainConfig, evmux, lightchain, relay)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
|
|
@ -133,10 +133,10 @@ func TerminalFormat(usecolor bool) Format {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// try to justify the log output for short messages
|
// try to justify the log output for short messages
|
||||||
if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
|
length := utf8.RuneCountInString(r.Msg)
|
||||||
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
|
if len(r.Ctx) > 0 && length < termMsgJust {
|
||||||
|
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-length))
|
||||||
}
|
}
|
||||||
|
|
||||||
// print the keys logfmt style
|
// print the keys logfmt style
|
||||||
logfmt(b, r.Ctx, color, true)
|
logfmt(b, r.Ctx, color, true)
|
||||||
return b.Bytes()
|
return b.Bytes()
|
||||||
|
|
|
@ -17,56 +17,49 @@
|
||||||
package miner
|
package miner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type CpuAgent struct {
|
type CpuAgent struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
workCh chan *Work
|
workCh chan *Work
|
||||||
quit chan struct{}
|
stop chan struct{}
|
||||||
quitCurrentOp chan struct{}
|
quitCurrentOp chan struct{}
|
||||||
returnCh chan<- *Result
|
returnCh chan<- *Result
|
||||||
|
|
||||||
index int
|
chain consensus.ChainReader
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
|
|
||||||
isMining int32 // isMining indicates whether the agent is currently mining
|
isMining int32 // isMining indicates whether the agent is currently mining
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCpuAgent(index int, pow pow.PoW) *CpuAgent {
|
func NewCpuAgent(chain consensus.ChainReader, engine consensus.Engine) *CpuAgent {
|
||||||
miner := &CpuAgent{
|
miner := &CpuAgent{
|
||||||
pow: pow,
|
chain: chain,
|
||||||
index: index,
|
engine: engine,
|
||||||
quit: make(chan struct{}),
|
stop: make(chan struct{}, 1),
|
||||||
workCh: make(chan *Work, 1),
|
workCh: make(chan *Work, 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
return miner
|
return miner
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
|
func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
|
||||||
func (self *CpuAgent) Pow() pow.PoW { return self.pow }
|
|
||||||
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
|
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
|
||||||
|
|
||||||
func (self *CpuAgent) Stop() {
|
func (self *CpuAgent) Stop() {
|
||||||
close(self.quit)
|
self.stop <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *CpuAgent) Start() {
|
func (self *CpuAgent) Start() {
|
||||||
|
|
||||||
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
|
||||||
return // agent already started
|
return // agent already started
|
||||||
}
|
}
|
||||||
|
|
||||||
go self.update()
|
go self.update()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +75,7 @@ out:
|
||||||
self.quitCurrentOp = make(chan struct{})
|
self.quitCurrentOp = make(chan struct{})
|
||||||
go self.mine(work, self.quitCurrentOp)
|
go self.mine(work, self.quitCurrentOp)
|
||||||
self.mu.Unlock()
|
self.mu.Unlock()
|
||||||
case <-self.quit:
|
case <-self.stop:
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
if self.quitCurrentOp != nil {
|
if self.quitCurrentOp != nil {
|
||||||
close(self.quitCurrentOp)
|
close(self.quitCurrentOp)
|
||||||
|
@ -99,27 +92,27 @@ done:
|
||||||
select {
|
select {
|
||||||
case <-self.workCh:
|
case <-self.workCh:
|
||||||
default:
|
default:
|
||||||
close(self.workCh)
|
|
||||||
break done
|
break done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt32(&self.isMining, 0)
|
atomic.StoreInt32(&self.isMining, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
||||||
log.Debug(fmt.Sprintf("(re)started agent[%d]. mining...\n", self.index))
|
if result, err := self.engine.Seal(self.chain, work.Block, stop); result != nil {
|
||||||
|
log.Info("Successfully sealed new block", "number", result.Number(), "hash", result.Hash())
|
||||||
// Mine
|
self.returnCh <- &Result{work, result}
|
||||||
nonce, mixDigest := self.pow.Search(work.Block, stop)
|
|
||||||
if nonce != 0 {
|
|
||||||
block := work.Block.WithMiningResult(types.EncodeNonce(nonce), common.BytesToHash(mixDigest))
|
|
||||||
self.returnCh <- &Result{work, block}
|
|
||||||
} else {
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Block sealing failed", "err", err)
|
||||||
|
}
|
||||||
self.returnCh <- nil
|
self.returnCh <- nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *CpuAgent) GetHashRate() int64 {
|
func (self *CpuAgent) GetHashRate() int64 {
|
||||||
return int64(self.pow.Hashrate())
|
if pow, ok := self.engine.(consensus.PoW); ok {
|
||||||
|
return int64(pow.Hashrate())
|
||||||
|
}
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -32,7 +33,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend wraps all methods required for mining.
|
// Backend wraps all methods required for mining.
|
||||||
|
@ -49,24 +49,24 @@ type Miner struct {
|
||||||
|
|
||||||
worker *worker
|
worker *worker
|
||||||
|
|
||||||
threads int
|
|
||||||
coinbase common.Address
|
coinbase common.Address
|
||||||
mining int32
|
mining int32
|
||||||
eth Backend
|
eth Backend
|
||||||
pow pow.PoW
|
engine consensus.Engine
|
||||||
|
|
||||||
canStart int32 // can start indicates whether we can start the mining operation
|
canStart int32 // can start indicates whether we can start the mining operation
|
||||||
shouldStart int32 // should start indicates whether we should start after sync
|
shouldStart int32 // should start indicates whether we should start after sync
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, pow pow.PoW) *Miner {
|
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine) *Miner {
|
||||||
miner := &Miner{
|
miner := &Miner{
|
||||||
eth: eth,
|
eth: eth,
|
||||||
mux: mux,
|
mux: mux,
|
||||||
pow: pow,
|
engine: engine,
|
||||||
worker: newWorker(config, common.Address{}, eth, mux),
|
worker: newWorker(config, engine, common.Address{}, eth, mux),
|
||||||
canStart: 1,
|
canStart: 1,
|
||||||
}
|
}
|
||||||
|
miner.Register(NewCpuAgent(eth.BlockChain(), engine))
|
||||||
go miner.update()
|
go miner.update()
|
||||||
|
|
||||||
return miner
|
return miner
|
||||||
|
@ -86,7 +86,7 @@ out:
|
||||||
if self.Mining() {
|
if self.Mining() {
|
||||||
self.Stop()
|
self.Stop()
|
||||||
atomic.StoreInt32(&self.shouldStart, 1)
|
atomic.StoreInt32(&self.shouldStart, 1)
|
||||||
log.Info(fmt.Sprint("Mining operation aborted due to sync operation"))
|
log.Info("Mining aborted due to sync")
|
||||||
}
|
}
|
||||||
case downloader.DoneEvent, downloader.FailedEvent:
|
case downloader.DoneEvent, downloader.FailedEvent:
|
||||||
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
||||||
|
@ -94,7 +94,7 @@ out:
|
||||||
atomic.StoreInt32(&self.canStart, 1)
|
atomic.StoreInt32(&self.canStart, 1)
|
||||||
atomic.StoreInt32(&self.shouldStart, 0)
|
atomic.StoreInt32(&self.shouldStart, 0)
|
||||||
if shouldStart {
|
if shouldStart {
|
||||||
self.Start(self.coinbase, self.threads)
|
self.Start(self.coinbase)
|
||||||
}
|
}
|
||||||
// unsubscribe. we're only interested in this event once
|
// unsubscribe. we're only interested in this event once
|
||||||
events.Unsubscribe()
|
events.Unsubscribe()
|
||||||
|
@ -116,23 +116,18 @@ func (m *Miner) SetGasPrice(price *big.Int) {
|
||||||
m.worker.setGasPrice(price)
|
m.worker.setGasPrice(price)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Miner) Start(coinbase common.Address, threads int) {
|
func (self *Miner) Start(coinbase common.Address) {
|
||||||
atomic.StoreInt32(&self.shouldStart, 1)
|
atomic.StoreInt32(&self.shouldStart, 1)
|
||||||
self.worker.setEtherbase(coinbase)
|
self.worker.setEtherbase(coinbase)
|
||||||
self.coinbase = coinbase
|
self.coinbase = coinbase
|
||||||
self.threads = threads
|
|
||||||
|
|
||||||
if atomic.LoadInt32(&self.canStart) == 0 {
|
if atomic.LoadInt32(&self.canStart) == 0 {
|
||||||
log.Info(fmt.Sprint("Can not start mining operation due to network sync (starts when finished)"))
|
log.Info("Network syncing, will start miner afterwards")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
atomic.StoreInt32(&self.mining, 1)
|
atomic.StoreInt32(&self.mining, 1)
|
||||||
|
|
||||||
for i := 0; i < threads; i++ {
|
log.Info("Starting mining operation")
|
||||||
self.worker.register(NewCpuAgent(i, self.pow))
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)))
|
|
||||||
self.worker.start()
|
self.worker.start()
|
||||||
self.worker.commitNewWork()
|
self.worker.commitNewWork()
|
||||||
}
|
}
|
||||||
|
@ -159,7 +154,9 @@ func (self *Miner) Mining() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Miner) HashRate() (tot int64) {
|
func (self *Miner) HashRate() (tot int64) {
|
||||||
tot += int64(self.pow.Hashrate())
|
if pow, ok := self.engine.(consensus.PoW); ok {
|
||||||
|
tot += int64(pow.Hashrate())
|
||||||
|
}
|
||||||
// do we care this might race? is it worth we're rewriting some
|
// do we care this might race? is it worth we're rewriting some
|
||||||
// aspects of the worker/locking up agents so we can get an accurate
|
// aspects of the worker/locking up agents so we can get an accurate
|
||||||
// hashrate?
|
// hashrate?
|
||||||
|
|
|
@ -18,16 +18,16 @@ package miner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashrate struct {
|
type hashrate struct {
|
||||||
|
@ -42,7 +42,8 @@ type RemoteAgent struct {
|
||||||
workCh chan *Work
|
workCh chan *Work
|
||||||
returnCh chan<- *Result
|
returnCh chan<- *Result
|
||||||
|
|
||||||
pow pow.PoW
|
chain consensus.ChainReader
|
||||||
|
engine consensus.Engine
|
||||||
currentWork *Work
|
currentWork *Work
|
||||||
work map[common.Hash]*Work
|
work map[common.Hash]*Work
|
||||||
|
|
||||||
|
@ -52,9 +53,10 @@ type RemoteAgent struct {
|
||||||
running int32 // running indicates whether the agent is active. Call atomically
|
running int32 // running indicates whether the agent is active. Call atomically
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRemoteAgent(pow pow.PoW) *RemoteAgent {
|
func NewRemoteAgent(chain consensus.ChainReader, engine consensus.Engine) *RemoteAgent {
|
||||||
return &RemoteAgent{
|
return &RemoteAgent{
|
||||||
pow: pow,
|
chain: chain,
|
||||||
|
engine: engine,
|
||||||
work: make(map[common.Hash]*Work),
|
work: make(map[common.Hash]*Work),
|
||||||
hashrate: make(map[common.Hash]hashrate),
|
hashrate: make(map[common.Hash]hashrate),
|
||||||
}
|
}
|
||||||
|
@ -114,7 +116,7 @@ func (a *RemoteAgent) GetWork() ([3]string, error) {
|
||||||
block := a.currentWork.Block
|
block := a.currentWork.Block
|
||||||
|
|
||||||
res[0] = block.HashNoNonce().Hex()
|
res[0] = block.HashNoNonce().Hex()
|
||||||
seedHash := pow.EthashSeedHash(block.NumberU64())
|
seedHash := ethash.SeedHash(block.NumberU64())
|
||||||
res[1] = common.BytesToHash(seedHash).Hex()
|
res[1] = common.BytesToHash(seedHash).Hex()
|
||||||
// Calculate the "target" to be returned to the external miner
|
// Calculate the "target" to be returned to the external miner
|
||||||
n := big.NewInt(1)
|
n := big.NewInt(1)
|
||||||
|
@ -129,8 +131,8 @@ func (a *RemoteAgent) GetWork() ([3]string, error) {
|
||||||
return res, errors.New("No work available yet, don't panic.")
|
return res, errors.New("No work available yet, don't panic.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitWork tries to inject a PoW solution tinto the remote agent, returning
|
// SubmitWork tries to inject a pow solution into the remote agent, returning
|
||||||
// whether the solution was acceted or not (not can be both a bad PoW as well as
|
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||||
// any other error, like no work pending).
|
// any other error, like no work pending).
|
||||||
func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.Hash) bool {
|
func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.Hash) bool {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
|
@ -139,15 +141,20 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
|
||||||
// Make sure the work submitted is present
|
// Make sure the work submitted is present
|
||||||
work := a.work[hash]
|
work := a.work[hash]
|
||||||
if work == nil {
|
if work == nil {
|
||||||
log.Info(fmt.Sprintf("Work was submitted for %x but no pending work found", hash))
|
log.Info("Work submitted but none pending", "hash", hash)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Make sure the PoW solutions is indeed valid
|
// Make sure the Engine solutions is indeed valid
|
||||||
block := work.Block.WithMiningResult(nonce, mixDigest)
|
result := work.Block.Header()
|
||||||
if err := a.pow.Verify(block); err != nil {
|
result.Nonce = nonce
|
||||||
log.Warn(fmt.Sprintf("Invalid PoW submitted for %x: %v", hash, err))
|
result.MixDigest = mixDigest
|
||||||
|
|
||||||
|
if err := a.engine.VerifySeal(a.chain, result); err != nil {
|
||||||
|
log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
block := work.Block.WithSeal(result)
|
||||||
|
|
||||||
// Solutions seems to be valid, return to the miner and notify acceptance
|
// Solutions seems to be valid, return to the miner and notify acceptance
|
||||||
a.returnCh <- &Result{work, block}
|
a.returnCh <- &Result{work, block}
|
||||||
delete(a.work, hash)
|
delete(a.work, hash)
|
||||||
|
|
|
@ -18,7 +18,6 @@ package miner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/ring"
|
"container/ring"
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
@ -80,7 +79,7 @@ func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) {
|
||||||
set.blocks.Move(-1).Link(item)
|
set.blocks.Move(-1).Link(item)
|
||||||
}
|
}
|
||||||
// Display a log for the user to notify of a new mined block unconfirmed
|
// Display a log for the user to notify of a new mined block unconfirmed
|
||||||
log.Info(fmt.Sprintf("🔨 mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth))
|
log.Info("🔨 mined potential block", "number", index, "hash", hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth
|
// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth
|
||||||
|
@ -100,11 +99,11 @@ func (set *unconfirmedBlocks) Shift(height uint64) {
|
||||||
header := set.chain.GetHeaderByNumber(next.index)
|
header := set.chain.GetHeaderByNumber(next.index)
|
||||||
switch {
|
switch {
|
||||||
case header == nil:
|
case header == nil:
|
||||||
log.Warn(fmt.Sprintf("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4]))
|
log.Warn("Failed to retrieve header of mined block", "number", next.index, "hash", next.hash)
|
||||||
case header.Hash() == next.hash:
|
case header.Hash() == next.hash:
|
||||||
log.Info(fmt.Sprintf("🔗 mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4]))
|
log.Info("🔗 block reached canonical chain", "number", next.index, "hash", next.hash)
|
||||||
default:
|
default:
|
||||||
log.Info(fmt.Sprintf("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4]))
|
log.Info("⑂ block became a side fork", "number", next.index, "hash", next.hash)
|
||||||
}
|
}
|
||||||
// Drop the block out of the ring
|
// Drop the block out of the ring
|
||||||
if set.blocks.Value == set.blocks.Next().Value {
|
if set.blocks.Value == set.blocks.Next().Value {
|
||||||
|
|
|
@ -26,6 +26,8 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -34,7 +36,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"gopkg.in/fatih/set.v0"
|
"gopkg.in/fatih/set.v0"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -84,6 +85,7 @@ type Result struct {
|
||||||
// worker is the main object which takes care of applying messages to the new state
|
// worker is the main object which takes care of applying messages to the new state
|
||||||
type worker struct {
|
type worker struct {
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig
|
||||||
|
engine consensus.Engine
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
|
@ -94,7 +96,6 @@ type worker struct {
|
||||||
|
|
||||||
agents map[Agent]struct{}
|
agents map[Agent]struct{}
|
||||||
recv chan *Result
|
recv chan *Result
|
||||||
pow pow.PoW
|
|
||||||
|
|
||||||
eth Backend
|
eth Backend
|
||||||
chain *core.BlockChain
|
chain *core.BlockChain
|
||||||
|
@ -123,9 +124,10 @@ type worker struct {
|
||||||
fullValidation bool
|
fullValidation bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWorker(config *params.ChainConfig, coinbase common.Address, eth Backend, mux *event.TypeMux) *worker {
|
func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase common.Address, eth Backend, mux *event.TypeMux) *worker {
|
||||||
worker := &worker{
|
worker := &worker{
|
||||||
config: config,
|
config: config,
|
||||||
|
engine: engine,
|
||||||
eth: eth,
|
eth: eth,
|
||||||
mux: mux,
|
mux: mux,
|
||||||
chainDb: eth.ChainDb(),
|
chainDb: eth.ChainDb(),
|
||||||
|
@ -209,16 +211,10 @@ func (self *worker) stop() {
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
defer self.mu.Unlock()
|
defer self.mu.Unlock()
|
||||||
if atomic.LoadInt32(&self.mining) == 1 {
|
if atomic.LoadInt32(&self.mining) == 1 {
|
||||||
// Stop all agents.
|
|
||||||
for agent := range self.agents {
|
for agent := range self.agents {
|
||||||
agent.Stop()
|
agent.Stop()
|
||||||
// Remove CPU agents.
|
|
||||||
if _, ok := agent.(*CpuAgent); ok {
|
|
||||||
delete(self.agents, agent)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt32(&self.mining, 0)
|
atomic.StoreInt32(&self.mining, 0)
|
||||||
atomic.StoreInt32(&self.atWork, 0)
|
atomic.StoreInt32(&self.atWork, 0)
|
||||||
}
|
}
|
||||||
|
@ -277,7 +273,7 @@ func (self *worker) wait() {
|
||||||
|
|
||||||
if self.fullValidation {
|
if self.fullValidation {
|
||||||
if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil {
|
if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil {
|
||||||
log.Error(fmt.Sprint("mining err", err))
|
log.Error("Mined invalid block", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go self.mux.Post(core.NewMinedBlockEvent{Block: block})
|
go self.mux.Post(core.NewMinedBlockEvent{Block: block})
|
||||||
|
@ -285,19 +281,16 @@ func (self *worker) wait() {
|
||||||
work.state.Commit(self.config.IsEIP158(block.Number()))
|
work.state.Commit(self.config.IsEIP158(block.Number()))
|
||||||
parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
log.Error(fmt.Sprint("Invalid block found during mining"))
|
log.Error("Invalid block found during mining")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if err := self.engine.VerifyHeader(self.chain, block.Header(), false); err != nil {
|
||||||
auxValidator := self.eth.BlockChain().AuxValidator()
|
log.Error("Invalid header on mined block", "err", err)
|
||||||
if err := core.ValidateHeader(self.config, auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
|
|
||||||
log.Error(fmt.Sprint("Invalid header on mined block:", err))
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := self.chain.WriteBlock(block)
|
stat, err := self.chain.WriteBlock(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprint("error writing block to chain", err))
|
log.Error("Failed writing block to chain", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +326,7 @@ func (self *worker) wait() {
|
||||||
self.mux.Post(logs)
|
self.mux.Post(logs)
|
||||||
}
|
}
|
||||||
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||||
log.Warn(fmt.Sprint("error writing block receipts:", err))
|
log.Warn("Failed writing block receipts", "err", err)
|
||||||
}
|
}
|
||||||
}(block, work.state.Logs(), work.receipts)
|
}(block, work.state.Logs(), work.receipts)
|
||||||
}
|
}
|
||||||
|
@ -424,9 +417,9 @@ func (self *worker) commitNewWork() {
|
||||||
tstamp = parent.Time().Int64() + 1
|
tstamp = parent.Time().Int64() + 1
|
||||||
}
|
}
|
||||||
// this will ensure we're not going off too far in the future
|
// this will ensure we're not going off too far in the future
|
||||||
if now := time.Now().Unix(); tstamp > now+4 {
|
if now := time.Now().Unix(); tstamp > now+1 {
|
||||||
wait := time.Duration(tstamp-now) * time.Second
|
wait := time.Duration(tstamp-now) * time.Second
|
||||||
log.Info(fmt.Sprint("We are too far in the future. Waiting for", wait))
|
log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait))
|
||||||
time.Sleep(wait)
|
time.Sleep(wait)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,13 +427,19 @@ func (self *worker) commitNewWork() {
|
||||||
header := &types.Header{
|
header := &types.Header{
|
||||||
ParentHash: parent.Hash(),
|
ParentHash: parent.Hash(),
|
||||||
Number: num.Add(num, common.Big1),
|
Number: num.Add(num, common.Big1),
|
||||||
Difficulty: core.CalcDifficulty(self.config, uint64(tstamp), parent.Time().Uint64(), parent.Number(), parent.Difficulty()),
|
|
||||||
GasLimit: core.CalcGasLimit(parent),
|
GasLimit: core.CalcGasLimit(parent),
|
||||||
GasUsed: new(big.Int),
|
GasUsed: new(big.Int),
|
||||||
Coinbase: self.coinbase,
|
|
||||||
Extra: self.extra,
|
Extra: self.extra,
|
||||||
Time: big.NewInt(tstamp),
|
Time: big.NewInt(tstamp),
|
||||||
}
|
}
|
||||||
|
// Only set the coinbase if we are mining (avoid spurious block rewards)
|
||||||
|
if atomic.LoadInt32(&self.mining) == 1 {
|
||||||
|
header.Coinbase = self.coinbase
|
||||||
|
}
|
||||||
|
if err := self.engine.Prepare(self.chain, header); err != nil {
|
||||||
|
log.Error("Failed to prepare header for mining", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
// If we are care about TheDAO hard-fork check whether to override the extra-data or not
|
// If we are care about TheDAO hard-fork check whether to override the extra-data or not
|
||||||
if daoBlock := self.config.DAOForkBlock; daoBlock != nil {
|
if daoBlock := self.config.DAOForkBlock; daoBlock != nil {
|
||||||
// Check whether the block is among the fork extra-override range
|
// Check whether the block is among the fork extra-override range
|
||||||
|
@ -457,21 +456,19 @@ func (self *worker) commitNewWork() {
|
||||||
// Could potentially happen if starting to mine in an odd state.
|
// Could potentially happen if starting to mine in an odd state.
|
||||||
err := self.makeCurrent(parent, header)
|
err := self.makeCurrent(parent, header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info(fmt.Sprint("Could not create new env for mining, retrying on next block."))
|
log.Error("Failed to create mining context", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Create the current work task and check any fork transitions needed
|
// Create the current work task and check any fork transitions needed
|
||||||
work := self.current
|
work := self.current
|
||||||
if self.config.DAOForkSupport && self.config.DAOForkBlock != nil && self.config.DAOForkBlock.Cmp(header.Number) == 0 {
|
if self.config.DAOForkSupport && self.config.DAOForkBlock != nil && self.config.DAOForkBlock.Cmp(header.Number) == 0 {
|
||||||
core.ApplyDAOHardFork(work.state)
|
misc.ApplyDAOHardFork(work.state)
|
||||||
}
|
}
|
||||||
|
|
||||||
pending, err := self.eth.TxPool().Pending()
|
pending, err := self.eth.TxPool().Pending()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprintf("Could not fetch pending transactions: %v", err))
|
log.Error("Failed to fetch pending transactions", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
txs := types.NewTransactionsByPriceAndNonce(pending)
|
txs := types.NewTransactionsByPriceAndNonce(pending)
|
||||||
work.commitTransactions(self.mux, txs, self.gasPrice, self.chain)
|
work.commitTransactions(self.mux, txs, self.gasPrice, self.chain)
|
||||||
|
|
||||||
|
@ -488,31 +485,26 @@ func (self *worker) commitNewWork() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := self.commitUncle(work, uncle.Header()); err != nil {
|
if err := self.commitUncle(work, uncle.Header()); err != nil {
|
||||||
log.Trace(fmt.Sprintf("Bad uncle found and will be removed (%x)\n", hash[:4]))
|
log.Trace("Bad uncle found and will be removed", "hash", hash)
|
||||||
log.Trace(fmt.Sprint(uncle))
|
log.Trace(fmt.Sprint(uncle))
|
||||||
|
|
||||||
badUncles = append(badUncles, hash)
|
badUncles = append(badUncles, hash)
|
||||||
} else {
|
} else {
|
||||||
log.Debug(fmt.Sprintf("committing %x as uncle\n", hash[:4]))
|
log.Debug("Committing new uncle to block", "hash", hash)
|
||||||
uncles = append(uncles, uncle.Header())
|
uncles = append(uncles, uncle.Header())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, hash := range badUncles {
|
for _, hash := range badUncles {
|
||||||
delete(self.possibleUncles, hash)
|
delete(self.possibleUncles, hash)
|
||||||
}
|
}
|
||||||
|
// Create the new block to seal with the consensus engine
|
||||||
if atomic.LoadInt32(&self.mining) == 1 {
|
if work.Block, err = self.engine.Finalize(self.chain, header, work.state, work.txs, uncles, work.receipts); err != nil {
|
||||||
// commit state root after all state transitions.
|
log.Error("Failed to finalize block for sealing", "err", err)
|
||||||
core.AccumulateRewards(work.state, header, uncles)
|
return
|
||||||
header.Root = work.state.IntermediateRoot(self.config.IsEIP158(header.Number))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// create the new block whose nonce will be mined.
|
|
||||||
work.Block = types.NewBlock(header, work.txs, uncles, work.receipts)
|
|
||||||
|
|
||||||
// We only care about logging if we're actually mining.
|
// We only care about logging if we're actually mining.
|
||||||
if atomic.LoadInt32(&self.mining) == 1 {
|
if atomic.LoadInt32(&self.mining) == 1 {
|
||||||
log.Info(fmt.Sprintf("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)))
|
log.Info("Commit new mining work", "number", work.Block.Number(), "txs", work.tcount, "uncles", len(uncles), "elapsed", common.PrettyDuration(time.Since(tstart)))
|
||||||
self.unconfirmed.Shift(work.Block.NumberU64() - 1)
|
self.unconfirmed.Shift(work.Block.NumberU64() - 1)
|
||||||
}
|
}
|
||||||
self.push(work)
|
self.push(work)
|
||||||
|
@ -521,13 +513,13 @@ func (self *worker) commitNewWork() {
|
||||||
func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
|
func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
|
||||||
hash := uncle.Hash()
|
hash := uncle.Hash()
|
||||||
if work.uncles.Has(hash) {
|
if work.uncles.Has(hash) {
|
||||||
return core.UncleError("Uncle not unique")
|
return core.UncleError("uncle not unique")
|
||||||
}
|
}
|
||||||
if !work.ancestors.Has(uncle.ParentHash) {
|
if !work.ancestors.Has(uncle.ParentHash) {
|
||||||
return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
|
return core.UncleError(fmt.Sprintf("uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
|
||||||
}
|
}
|
||||||
if work.family.Has(hash) {
|
if work.family.Has(hash) {
|
||||||
return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", hash))
|
return core.UncleError(fmt.Sprintf("uncle already in family (%x)", hash))
|
||||||
}
|
}
|
||||||
work.uncles.Add(uncle.Hash())
|
work.uncles.Add(uncle.Hash())
|
||||||
return nil
|
return nil
|
||||||
|
@ -552,7 +544,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
|
||||||
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
||||||
// phase, start ignoring the sender until we do.
|
// phase, start ignoring the sender until we do.
|
||||||
if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
|
if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
|
||||||
log.Trace(fmt.Sprintf("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash()))
|
log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block)
|
||||||
|
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
continue
|
continue
|
||||||
|
@ -561,7 +553,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
|
||||||
// Ignore any transactions (and accounts subsequently) with low gas limits
|
// Ignore any transactions (and accounts subsequently) with low gas limits
|
||||||
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
|
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
|
||||||
// Pop the current low-priced transaction without shifting in the next from the account
|
// Pop the current low-priced transaction without shifting in the next from the account
|
||||||
log.Info(fmt.Sprintf("Transaction (%x) below gas price (tx=%dwei ask=%dwei). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], tx.GasPrice(), gasPrice, from[:4]))
|
log.Warn("Transaction below gas price", "sender", from, "hash", tx.Hash(), "have", tx.GasPrice(), "want", gasPrice)
|
||||||
|
|
||||||
env.lowGasTxs = append(env.lowGasTxs, tx)
|
env.lowGasTxs = append(env.lowGasTxs, tx)
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
|
@ -575,12 +567,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
|
||||||
switch {
|
switch {
|
||||||
case core.IsGasLimitErr(err):
|
case core.IsGasLimitErr(err):
|
||||||
// Pop the current out-of-gas transaction without shifting in the next from the account
|
// Pop the current out-of-gas transaction without shifting in the next from the account
|
||||||
log.Trace(fmt.Sprintf("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]))
|
log.Trace("Gas limit exceeded for current block", "sender", from)
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
|
|
||||||
case err != nil:
|
case err != nil:
|
||||||
// Pop the current failed transaction without shifting in the next from the account
|
// Pop the current failed transaction without shifting in the next from the account
|
||||||
log.Trace(fmt.Sprintf("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err))
|
log.Trace("Transaction failed, will be removed", "hash", tx.Hash(), "err", err)
|
||||||
env.failedTxs = append(env.failedTxs, tx)
|
env.failedTxs = append(env.failedTxs, tx)
|
||||||
txs.Pop()
|
txs.Pop()
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,6 @@ var (
|
||||||
TestNetSpuriousDragon = big.NewInt(10)
|
TestNetSpuriousDragon = big.NewInt(10)
|
||||||
MainNetSpuriousDragon = big.NewInt(2675000)
|
MainNetSpuriousDragon = big.NewInt(2675000)
|
||||||
|
|
||||||
TestNetChainID = big.NewInt(3) // Test net default chain ID
|
TestNetChainID = big.NewInt(3) // Testnet default chain ID
|
||||||
MainNetChainID = big.NewInt(1) // main net default chain ID
|
MainNetChainID = big.NewInt(1) // Mainnet default chain ID
|
||||||
)
|
)
|
||||||
|
|
58
pow/pow.go
58
pow/pow.go
|
@ -1,58 +0,0 @@
|
||||||
// Copyright 2014 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package pow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Block interface {
|
|
||||||
Difficulty() *big.Int
|
|
||||||
HashNoNonce() common.Hash
|
|
||||||
Nonce() uint64
|
|
||||||
MixDigest() common.Hash
|
|
||||||
NumberU64() uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type ChainManager interface {
|
|
||||||
GetBlockByNumber(uint64) *types.Block
|
|
||||||
CurrentBlock() *types.Block
|
|
||||||
}
|
|
||||||
|
|
||||||
type PoW interface {
|
|
||||||
Verify(block Block) error
|
|
||||||
Search(block Block, stop <-chan struct{}) (uint64, []byte)
|
|
||||||
Hashrate() float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakePow is a non-validating proof of work implementation.
|
|
||||||
// It returns true from Verify for any block.
|
|
||||||
type FakePow struct{}
|
|
||||||
|
|
||||||
// Verify implements PoW, returning a success for an input.
|
|
||||||
func (pow FakePow) Verify(block Block) error { return nil }
|
|
||||||
|
|
||||||
// Search implements PoW, returning the nonce 0 for any call.
|
|
||||||
func (pow FakePow) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashrate implements PoW, returning 0.
|
|
||||||
func (pow FakePow) Hashrate() float64 { return 0 }
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -35,7 +36,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/pow"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ func runBlockTest(homesteadBlock, daoForkBlock, gasPriceFork *big.Int, test *Blo
|
||||||
core.WriteHeadBlockHash(db, test.Genesis.Hash())
|
core.WriteHeadBlockHash(db, test.Genesis.Hash())
|
||||||
evmux := new(event.TypeMux)
|
evmux := new(event.TypeMux)
|
||||||
config := ¶ms.ChainConfig{HomesteadBlock: homesteadBlock, DAOForkBlock: daoForkBlock, DAOForkSupport: true, EIP150Block: gasPriceFork}
|
config := ¶ms.ChainConfig{HomesteadBlock: homesteadBlock, DAOForkBlock: daoForkBlock, DAOForkSupport: true, EIP150Block: gasPriceFork}
|
||||||
chain, err := core.NewBlockChain(db, config, pow.NewSharedEthash(), evmux, vm.Config{})
|
chain, err := core.NewBlockChain(db, config, ethash.NewShared(), evmux, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ package trie
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue