mirror of https://github.com/status-im/op-geth.git
Merge pull request #17347 from karalabe/miner-notify
cmd, consensus/ethash, eth: miner push notifications
This commit is contained in:
commit
d8328a96b4
|
@ -98,8 +98,9 @@ var (
|
||||||
utils.MaxPendingPeersFlag,
|
utils.MaxPendingPeersFlag,
|
||||||
utils.EtherbaseFlag,
|
utils.EtherbaseFlag,
|
||||||
utils.GasPriceFlag,
|
utils.GasPriceFlag,
|
||||||
utils.MinerThreadsFlag,
|
|
||||||
utils.MiningEnabledFlag,
|
utils.MiningEnabledFlag,
|
||||||
|
utils.MinerThreadsFlag,
|
||||||
|
utils.MinerNotifyFlag,
|
||||||
utils.TargetGasLimitFlag,
|
utils.TargetGasLimitFlag,
|
||||||
utils.NATFlag,
|
utils.NATFlag,
|
||||||
utils.NoDiscoverFlag,
|
utils.NoDiscoverFlag,
|
||||||
|
|
|
@ -185,6 +185,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.MiningEnabledFlag,
|
utils.MiningEnabledFlag,
|
||||||
utils.MinerThreadsFlag,
|
utils.MinerThreadsFlag,
|
||||||
|
utils.MinerNotifyFlag,
|
||||||
utils.EtherbaseFlag,
|
utils.EtherbaseFlag,
|
||||||
utils.TargetGasLimitFlag,
|
utils.TargetGasLimitFlag,
|
||||||
utils.GasPriceFlag,
|
utils.GasPriceFlag,
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -318,9 +317,13 @@ var (
|
||||||
Usage: "Enable mining",
|
Usage: "Enable mining",
|
||||||
}
|
}
|
||||||
MinerThreadsFlag = cli.IntFlag{
|
MinerThreadsFlag = cli.IntFlag{
|
||||||
Name: "minerthreads",
|
Name: "miner.threads",
|
||||||
Usage: "Number of CPU threads to use for mining",
|
Usage: "Number of CPU threads to use for mining",
|
||||||
Value: runtime.NumCPU(),
|
Value: 0,
|
||||||
|
}
|
||||||
|
MinerNotifyFlag = cli.StringFlag{
|
||||||
|
Name: "miner.notify",
|
||||||
|
Usage: "Comma separated HTTP URL list to notify of new work packages",
|
||||||
}
|
}
|
||||||
TargetGasLimitFlag = cli.Uint64Flag{
|
TargetGasLimitFlag = cli.Uint64Flag{
|
||||||
Name: "targetgaslimit",
|
Name: "targetgaslimit",
|
||||||
|
@ -1093,6 +1096,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||||
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
||||||
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
||||||
}
|
}
|
||||||
|
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
|
||||||
|
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
|
||||||
|
}
|
||||||
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
||||||
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
|
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
|
||||||
}
|
}
|
||||||
|
@ -1293,7 +1299,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||||
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
||||||
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||||
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||||
})
|
}, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||||
|
|
|
@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
|
|
||||||
go func(idx int) {
|
go func(idx int) {
|
||||||
defer pend.Done()
|
defer pend.Done()
|
||||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
|
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil)
|
||||||
defer ethash.Close()
|
defer ethash.Close()
|
||||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||||
|
|
|
@ -493,7 +493,7 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||||
return errInvalidMixDigest
|
return errInvalidMixDigest
|
||||||
}
|
}
|
||||||
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
target := new(big.Int).Div(two256, header.Difficulty)
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||||
return errInvalidPoW
|
return errInvalidPoW
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,11 +45,11 @@ import (
|
||||||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// maxUint256 is a big integer representing 2^256-1
|
// two256 is a big integer representing 2^256
|
||||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||||
|
|
||||||
// sharedEthash is a full instance that can be shared between multiple users.
|
// sharedEthash is a full instance that can be shared between multiple users.
|
||||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
|
||||||
|
|
||||||
// algorithmRevision is the data structure version used for file naming.
|
// algorithmRevision is the data structure version used for file naming.
|
||||||
algorithmRevision = 23
|
algorithmRevision = 23
|
||||||
|
@ -447,8 +447,10 @@ type Ethash struct {
|
||||||
exitCh chan chan error // Notification channel to exiting backend threads
|
exitCh chan chan error // Notification channel to exiting backend threads
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a full sized ethash PoW scheme and starts a background thread for remote mining.
|
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||||
func New(config Config) *Ethash {
|
// remote mining, also optionally notifying a batch of remote services of new work
|
||||||
|
// packages.
|
||||||
|
func New(config Config, notify []string) *Ethash {
|
||||||
if config.CachesInMem <= 0 {
|
if config.CachesInMem <= 0 {
|
||||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||||
config.CachesInMem = 1
|
config.CachesInMem = 1
|
||||||
|
@ -473,13 +475,13 @@ func New(config Config) *Ethash {
|
||||||
submitRateCh: make(chan *hashrate),
|
submitRateCh: make(chan *hashrate),
|
||||||
exitCh: make(chan chan error),
|
exitCh: make(chan chan error),
|
||||||
}
|
}
|
||||||
go ethash.remote()
|
go ethash.remote(notify)
|
||||||
return ethash
|
return ethash
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||||
// purposes.
|
// purposes.
|
||||||
func NewTester() *Ethash {
|
func NewTester(notify []string) *Ethash {
|
||||||
ethash := &Ethash{
|
ethash := &Ethash{
|
||||||
config: Config{PowMode: ModeTest},
|
config: Config{PowMode: ModeTest},
|
||||||
caches: newlru("cache", 1, newCache),
|
caches: newlru("cache", 1, newCache),
|
||||||
|
@ -494,7 +496,7 @@ func NewTester() *Ethash {
|
||||||
submitRateCh: make(chan *hashrate),
|
submitRateCh: make(chan *hashrate),
|
||||||
exitCh: make(chan chan error),
|
exitCh: make(chan chan error),
|
||||||
}
|
}
|
||||||
go ethash.remote()
|
go ethash.remote(notify)
|
||||||
return ethash
|
return ethash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,17 +32,18 @@ import (
|
||||||
|
|
||||||
// Tests that ethash works correctly in test mode.
|
// Tests that ethash works correctly in test mode.
|
||||||
func TestTestMode(t *testing.T) {
|
func TestTestMode(t *testing.T) {
|
||||||
head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||||
|
|
||||||
ethash := NewTester()
|
ethash := NewTester(nil)
|
||||||
defer ethash.Close()
|
defer ethash.Close()
|
||||||
block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil)
|
|
||||||
|
block, err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to seal block: %v", err)
|
t.Fatalf("failed to seal block: %v", err)
|
||||||
}
|
}
|
||||||
head.Nonce = types.EncodeNonce(block.Nonce())
|
header.Nonce = types.EncodeNonce(block.Nonce())
|
||||||
head.MixDigest = block.MixDigest()
|
header.MixDigest = block.MixDigest()
|
||||||
if err := ethash.VerifySeal(nil, head); err != nil {
|
if err := ethash.VerifySeal(nil, header); err != nil {
|
||||||
t.Fatalf("unexpected verification error: %v", err)
|
t.Fatalf("unexpected verification error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,7 +56,7 @@ func TestCacheFileEvict(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpdir)
|
defer os.RemoveAll(tmpdir)
|
||||||
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest})
|
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest}, nil)
|
||||||
defer e.Close()
|
defer e.Close()
|
||||||
|
|
||||||
workers := 8
|
workers := 8
|
||||||
|
@ -78,21 +79,21 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
|
||||||
if block < 0 {
|
if block < 0 {
|
||||||
block = 0
|
block = 0
|
||||||
}
|
}
|
||||||
head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
||||||
e.VerifySeal(nil, head)
|
e.VerifySeal(nil, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteSealer(t *testing.T) {
|
func TestRemoteSealer(t *testing.T) {
|
||||||
ethash := NewTester()
|
ethash := NewTester(nil)
|
||||||
defer ethash.Close()
|
defer ethash.Close()
|
||||||
|
|
||||||
api := &API{ethash}
|
api := &API{ethash}
|
||||||
if _, err := api.GetWork(); err != errNoMiningWork {
|
if _, err := api.GetWork(); err != errNoMiningWork {
|
||||||
t.Error("expect to return an error indicate there is no mining work")
|
t.Error("expect to return an error indicate there is no mining work")
|
||||||
}
|
}
|
||||||
|
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||||
head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
block := types.NewBlockWithHeader(header)
|
||||||
block := types.NewBlockWithHeader(head)
|
|
||||||
|
|
||||||
// Push new work.
|
// Push new work.
|
||||||
ethash.Seal(nil, block, nil)
|
ethash.Seal(nil, block, nil)
|
||||||
|
@ -108,16 +109,14 @@ func TestRemoteSealer(t *testing.T) {
|
||||||
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
|
if res := api.SubmitWork(types.BlockNonce{}, block.HashNoNonce(), common.Hash{}); res {
|
||||||
t.Error("expect to return false when submit a fake solution")
|
t.Error("expect to return false when submit a fake solution")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push new block with same block number to replace the original one.
|
// Push new block with same block number to replace the original one.
|
||||||
head = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)}
|
header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)}
|
||||||
block = types.NewBlockWithHeader(head)
|
block = types.NewBlockWithHeader(header)
|
||||||
ethash.Seal(nil, block, nil)
|
ethash.Seal(nil, block, nil)
|
||||||
|
|
||||||
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
|
if work, err = api.GetWork(); err != nil || work[0] != block.HashNoNonce().Hex() {
|
||||||
t.Error("expect to return the latest pushed work")
|
t.Error("expect to return the latest pushed work")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push block with higher block number.
|
// Push block with higher block number.
|
||||||
newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)}
|
newHead := &types.Header{Number: big.NewInt(2), Difficulty: big.NewInt(100)}
|
||||||
newBlock := types.NewBlockWithHeader(newHead)
|
newBlock := types.NewBlockWithHeader(newHead)
|
||||||
|
@ -130,19 +129,18 @@ func TestRemoteSealer(t *testing.T) {
|
||||||
|
|
||||||
func TestHashRate(t *testing.T) {
|
func TestHashRate(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
ethash = NewTester()
|
|
||||||
api = &API{ethash}
|
|
||||||
hashrate = []hexutil.Uint64{100, 200, 300}
|
hashrate = []hexutil.Uint64{100, 200, 300}
|
||||||
expect uint64
|
expect uint64
|
||||||
ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")}
|
ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")}
|
||||||
)
|
)
|
||||||
|
ethash := NewTester(nil)
|
||||||
defer ethash.Close()
|
defer ethash.Close()
|
||||||
|
|
||||||
if tot := ethash.Hashrate(); tot != 0 {
|
if tot := ethash.Hashrate(); tot != 0 {
|
||||||
t.Error("expect the result should be zero")
|
t.Error("expect the result should be zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
api := &API{ethash}
|
||||||
for i := 0; i < len(hashrate); i += 1 {
|
for i := 0; i < len(hashrate); i += 1 {
|
||||||
if res := api.SubmitHashRate(hashrate[i], ids[i]); !res {
|
if res := api.SubmitHashRate(hashrate[i], ids[i]); !res {
|
||||||
t.Error("remote miner submit hashrate failed")
|
t.Error("remote miner submit hashrate failed")
|
||||||
|
@ -155,9 +153,8 @@ func TestHashRate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClosedRemoteSealer(t *testing.T) {
|
func TestClosedRemoteSealer(t *testing.T) {
|
||||||
ethash := NewTester()
|
ethash := NewTester(nil)
|
||||||
// Make sure exit channel has been listened
|
time.Sleep(1 * time.Second) // ensure exit channel is listening
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
ethash.Close()
|
ethash.Close()
|
||||||
|
|
||||||
api := &API{ethash}
|
api := &API{ethash}
|
||||||
|
|
|
@ -17,11 +17,14 @@
|
||||||
package ethash
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -109,7 +112,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||||
var (
|
var (
|
||||||
header = block.Header()
|
header = block.Header()
|
||||||
hash = header.HashNoNonce().Bytes()
|
hash = header.HashNoNonce().Bytes()
|
||||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
target = new(big.Int).Div(two256, header.Difficulty)
|
||||||
number = header.Number.Uint64()
|
number = header.Number.Uint64()
|
||||||
dataset = ethash.dataset(number)
|
dataset = ethash.dataset(number)
|
||||||
)
|
)
|
||||||
|
@ -161,40 +164,65 @@ search:
|
||||||
runtime.KeepAlive(dataset)
|
runtime.KeepAlive(dataset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// remote starts a standalone goroutine to handle remote mining related stuff.
|
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||||
func (ethash *Ethash) remote() {
|
func (ethash *Ethash) remote(notify []string) {
|
||||||
var (
|
var (
|
||||||
works = make(map[common.Hash]*types.Block)
|
works = make(map[common.Hash]*types.Block)
|
||||||
rates = make(map[common.Hash]hashrate)
|
rates = make(map[common.Hash]hashrate)
|
||||||
currentWork *types.Block
|
|
||||||
)
|
|
||||||
|
|
||||||
// getWork returns a work package for external miner.
|
currentBlock *types.Block
|
||||||
|
currentWork [3]string
|
||||||
|
|
||||||
|
notifyTransport = &http.Transport{}
|
||||||
|
notifyClient = &http.Client{
|
||||||
|
Transport: notifyTransport,
|
||||||
|
Timeout: time.Second,
|
||||||
|
}
|
||||||
|
notifyReqs = make([]*http.Request, len(notify))
|
||||||
|
)
|
||||||
|
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||||
|
// new work to be processed.
|
||||||
|
notifyWork := func() {
|
||||||
|
work := currentWork
|
||||||
|
blob, _ := json.Marshal(work)
|
||||||
|
|
||||||
|
for i, url := range notify {
|
||||||
|
// Terminate any previously pending request and create the new work
|
||||||
|
if notifyReqs[i] != nil {
|
||||||
|
notifyTransport.CancelRequest(notifyReqs[i])
|
||||||
|
}
|
||||||
|
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||||
|
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
// Push the new work concurrently to all the remote nodes
|
||||||
|
go func(req *http.Request, url string) {
|
||||||
|
res, err := notifyClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Failed to notify remote miner", "err", err)
|
||||||
|
} else {
|
||||||
|
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||||
|
res.Body.Close()
|
||||||
|
}
|
||||||
|
}(notifyReqs[i], url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// makeWork creates a work package for external miner.
|
||||||
//
|
//
|
||||||
// The work package consists of 3 strings:
|
// The work package consists of 3 strings:
|
||||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||||
getWork := func() ([3]string, error) {
|
makeWork := func(block *types.Block) {
|
||||||
var res [3]string
|
hash := block.HashNoNonce()
|
||||||
if currentWork == nil {
|
|
||||||
return res, errNoMiningWork
|
|
||||||
}
|
|
||||||
res[0] = currentWork.HashNoNonce().Hex()
|
|
||||||
res[1] = common.BytesToHash(SeedHash(currentWork.NumberU64())).Hex()
|
|
||||||
|
|
||||||
// Calculate the "target" to be returned to the external sealer.
|
currentWork[0] = hash.Hex()
|
||||||
n := big.NewInt(1)
|
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||||
n.Lsh(n, 255)
|
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||||
n.Div(n, currentWork.Difficulty())
|
|
||||||
n.Lsh(n, 1)
|
|
||||||
res[2] = common.BytesToHash(n.Bytes()).Hex()
|
|
||||||
|
|
||||||
// Trace the seal work fetched by remote sealer.
|
// Trace the seal work fetched by remote sealer.
|
||||||
works[currentWork.HashNoNonce()] = currentWork
|
currentBlock = block
|
||||||
return res, nil
|
works[hash] = block
|
||||||
}
|
}
|
||||||
|
|
||||||
// submitWork verifies the submitted pow solution, returning
|
// submitWork verifies the submitted pow solution, returning
|
||||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||||
// any other error, like no pending work or stale mining result).
|
// any other error, like no pending work or stale mining result).
|
||||||
|
@ -238,21 +266,23 @@ func (ethash *Ethash) remote() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case block := <-ethash.workCh:
|
case block := <-ethash.workCh:
|
||||||
if currentWork != nil && block.ParentHash() != currentWork.ParentHash() {
|
if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
|
||||||
// Start new round mining, throw out all previous work.
|
// Start new round mining, throw out all previous work.
|
||||||
works = make(map[common.Hash]*types.Block)
|
works = make(map[common.Hash]*types.Block)
|
||||||
}
|
}
|
||||||
// Update current work with new received block.
|
// Update current work with new received block.
|
||||||
// Note same work can be past twice, happens when changing CPU threads.
|
// Note same work can be past twice, happens when changing CPU threads.
|
||||||
currentWork = block
|
makeWork(block)
|
||||||
|
|
||||||
|
// Notify and requested URLs of the new work availability
|
||||||
|
notifyWork()
|
||||||
|
|
||||||
case work := <-ethash.fetchWorkCh:
|
case work := <-ethash.fetchWorkCh:
|
||||||
// Return current mining work to remote miner.
|
// Return current mining work to remote miner.
|
||||||
miningWork, err := getWork()
|
if currentBlock == nil {
|
||||||
if err != nil {
|
work.errc <- errNoMiningWork
|
||||||
work.errc <- err
|
|
||||||
} else {
|
} else {
|
||||||
work.res <- miningWork
|
work.res <- currentWork
|
||||||
}
|
}
|
||||||
|
|
||||||
case result := <-ethash.submitWorkCh:
|
case result := <-ethash.submitWorkCh:
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
package ethash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||||
|
func TestRemoteNotify(t *testing.T) {
|
||||||
|
// Start a simple webserver to capture notifications
|
||||||
|
sink := make(chan [3]string)
|
||||||
|
|
||||||
|
server := &http.Server{
|
||||||
|
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
blob, err := ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read miner notification: %v", err)
|
||||||
|
}
|
||||||
|
var work [3]string
|
||||||
|
if err := json.Unmarshal(blob, &work); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||||
|
}
|
||||||
|
sink <- work
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
// Open a custom listener to extract its local address
|
||||||
|
listener, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open notification server: %v", err)
|
||||||
|
}
|
||||||
|
defer listener.Close()
|
||||||
|
|
||||||
|
go server.Serve(listener)
|
||||||
|
|
||||||
|
// Create the custom ethash engine
|
||||||
|
ethash := NewTester([]string{"http://" + listener.Addr().String()})
|
||||||
|
defer ethash.Close()
|
||||||
|
|
||||||
|
// Stream a work task and ensure the notification bubbles out
|
||||||
|
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||||
|
block := types.NewBlockWithHeader(header)
|
||||||
|
|
||||||
|
ethash.Seal(nil, block, nil)
|
||||||
|
select {
|
||||||
|
case work := <-sink:
|
||||||
|
if want := header.HashNoNonce().Hex(); work[0] != want {
|
||||||
|
t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want)
|
||||||
|
}
|
||||||
|
if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want {
|
||||||
|
t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want)
|
||||||
|
}
|
||||||
|
target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty)
|
||||||
|
if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want {
|
||||||
|
t.Errorf("work packet target mismatch: have %s, want %s", work[2], want)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("notification timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that pushing work packages fast to the miner doesn't cause any daa race
|
||||||
|
// issues in the notifications.
|
||||||
|
func TestRemoteMultiNotify(t *testing.T) {
|
||||||
|
// Start a simple webserver to capture notifications
|
||||||
|
sink := make(chan [3]string, 1024)
|
||||||
|
|
||||||
|
server := &http.Server{
|
||||||
|
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
blob, err := ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read miner notification: %v", err)
|
||||||
|
}
|
||||||
|
var work [3]string
|
||||||
|
if err := json.Unmarshal(blob, &work); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal miner notification: %v", err)
|
||||||
|
}
|
||||||
|
sink <- work
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
// Open a custom listener to extract its local address
|
||||||
|
listener, err := net.Listen("tcp", "localhost:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open notification server: %v", err)
|
||||||
|
}
|
||||||
|
defer listener.Close()
|
||||||
|
|
||||||
|
go server.Serve(listener)
|
||||||
|
|
||||||
|
// Create the custom ethash engine
|
||||||
|
ethash := NewTester([]string{"http://" + listener.Addr().String()})
|
||||||
|
defer ethash.Close()
|
||||||
|
|
||||||
|
// Stream a lot of work task and ensure all the notifications bubble out
|
||||||
|
for i := 0; i < cap(sink); i++ {
|
||||||
|
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||||
|
block := types.NewBlockWithHeader(header)
|
||||||
|
|
||||||
|
ethash.Seal(nil, block, nil)
|
||||||
|
}
|
||||||
|
for i := 0; i < cap(sink); i++ {
|
||||||
|
select {
|
||||||
|
case <-sink:
|
||||||
|
case <-time.After(250 * time.Millisecond):
|
||||||
|
t.Fatalf("notification %d timed out", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -124,7 +124,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
eventMux: ctx.EventMux,
|
eventMux: ctx.EventMux,
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
networkID: config.NetworkId,
|
networkID: config.NetworkId,
|
||||||
gasPrice: config.GasPrice,
|
gasPrice: config.GasPrice,
|
||||||
|
@ -210,7 +210,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||||
func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, db ethdb.Database) consensus.Engine {
|
||||||
// If proof-of-authority is requested, set it up
|
// If proof-of-authority is requested, set it up
|
||||||
if chainConfig.Clique != nil {
|
if chainConfig.Clique != nil {
|
||||||
return clique.New(chainConfig.Clique, db)
|
return clique.New(chainConfig.Clique, db)
|
||||||
|
@ -222,7 +222,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
|
||||||
return ethash.NewFaker()
|
return ethash.NewFaker()
|
||||||
case ethash.ModeTest:
|
case ethash.ModeTest:
|
||||||
log.Warn("Ethash used in test mode")
|
log.Warn("Ethash used in test mode")
|
||||||
return ethash.NewTester()
|
return ethash.NewTester(nil)
|
||||||
case ethash.ModeShared:
|
case ethash.ModeShared:
|
||||||
log.Warn("Ethash used in shared mode")
|
log.Warn("Ethash used in shared mode")
|
||||||
return ethash.NewShared()
|
return ethash.NewShared()
|
||||||
|
@ -234,7 +234,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
|
||||||
DatasetDir: config.DatasetDir,
|
DatasetDir: config.DatasetDir,
|
||||||
DatasetsInMem: config.DatasetsInMem,
|
DatasetsInMem: config.DatasetsInMem,
|
||||||
DatasetsOnDisk: config.DatasetsOnDisk,
|
DatasetsOnDisk: config.DatasetsOnDisk,
|
||||||
})
|
}, notify)
|
||||||
engine.SetThreads(-1) // Disable CPU mining
|
engine.SetThreads(-1) // Disable CPU mining
|
||||||
return engine
|
return engine
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,6 +97,7 @@ type Config struct {
|
||||||
// Mining-related options
|
// Mining-related options
|
||||||
Etherbase common.Address `toml:",omitempty"`
|
Etherbase common.Address `toml:",omitempty"`
|
||||||
MinerThreads int `toml:",omitempty"`
|
MinerThreads int `toml:",omitempty"`
|
||||||
|
MinerNotify []string `toml:",omitempty"`
|
||||||
ExtraData []byte `toml:",omitempty"`
|
ExtraData []byte `toml:",omitempty"`
|
||||||
GasPrice *big.Int
|
GasPrice *big.Int
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||||
peers: peers,
|
peers: peers,
|
||||||
reqDist: newRequestDistributor(peers, quitSync),
|
reqDist: newRequestDistributor(peers, quitSync),
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
networkId: config.NetworkId,
|
networkId: config.NetworkId,
|
||||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||||
|
|
Loading…
Reference in New Issue