Update geth to 1.8.14 (#1171)
* Update to geth v1.8.14 * Remove patches that were merged upstream * Apply patches before 0016 * Fix 0016 and apply it * Apply everything else * Pass gas limit as a second argument to simulated backend
This commit is contained in:
parent
3521c2ac45
commit
f150d678de
|
@ -88,7 +88,7 @@
|
|||
revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b2f36e5422bda96da299f9e48c7dcdd3177b18477297f84ff1a45ec3032c2994"
|
||||
digest = "1:d670c508dc01984c721d0d968936412e3edcd8ca58caf82fcfd0df9044013a0f"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -104,6 +104,7 @@
|
|||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
"common/prque",
|
||||
"consensus",
|
||||
"consensus/clique",
|
||||
"consensus/ethash",
|
||||
|
@ -155,8 +156,8 @@
|
|||
"whisper/whisperv6",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "225171a4bfcc16bd12a1906b1e0d43d0b18c353b"
|
||||
version = "v1.8.13"
|
||||
revision = "316fc7ecfc10d06603f1358c1f4c1020ec36dd2a"
|
||||
version = "v1.8.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5ac7ecd476a2355a5201229081df2e5f57333ecf703e1f69dde699ae34169c1b"
|
||||
|
@ -923,14 +924,15 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:13e050f402d48dc1e296490a9b354c78ab283e0f287531286ec08d33ad604067"
|
||||
digest = "1:40800a1a379be16c1ecc68d1c8cc912f6e468f95238117be9b54979d0cbb6819"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "2c42eef0765b9837fbdab12011af7830f55f88f0"
|
||||
revision = "11551d06cbcc94edc80a0facaccbda56473c19c1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
|
@ -27,7 +27,7 @@ ignored = [ "github.com/ethereum/go-ethereum/ethapi" ]
|
|||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
version = "v1.8.13"
|
||||
version = "=v1.8.14"
|
||||
|
||||
# * * * * * `go-ethereum` dependencies * * * * *
|
||||
# Pinned down SHAs from `go-ethereum/vendor/vendor.json`
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
diff --git i/eth/downloader/downloader.go w/eth/downloader/downloader.go
|
||||
index 43f0e3db9..b337f95c9 100644
|
||||
--- i/eth/downloader/downloader.go
|
||||
+++ w/eth/downloader/downloader.go
|
||||
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
|
||||
index fbde9c6c..c0013a11 100644
|
||||
--- a/eth/downloader/downloader.go
|
||||
+++ b/eth/downloader/downloader.go
|
||||
@@ -143,6 +143,8 @@ type Downloader struct {
|
||||
quitCh chan struct{} // Quit channel to signal termination
|
||||
quitLock sync.RWMutex // Lock to prevent double closes
|
||||
|
@ -57,10 +57,10 @@ index 43f0e3db9..b337f95c9 100644
|
|||
}
|
||||
|
||||
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
||||
diff --git i/eth/handler.go w/eth/handler.go
|
||||
index 4069359c9..da9ebb243 100644
|
||||
--- i/eth/handler.go
|
||||
+++ w/eth/handler.go
|
||||
diff --git a/eth/handler.go b/eth/handler.go
|
||||
index f89f68c9..5522b0d9 100644
|
||||
--- a/eth/handler.go
|
||||
+++ b/eth/handler.go
|
||||
@@ -230,6 +230,9 @@ func (pm *ProtocolManager) Stop() {
|
||||
// Quit fetcher, txsyncLoop.
|
||||
close(pm.quitSync)
|
||||
|
@ -71,10 +71,10 @@ index 4069359c9..da9ebb243 100644
|
|||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
diff --git i/eth/sync.go w/eth/sync.go
|
||||
index e49e40087..4367434a6 100644
|
||||
--- i/eth/sync.go
|
||||
+++ w/eth/sync.go
|
||||
diff --git a/eth/sync.go b/eth/sync.go
|
||||
index e49e4008..4367434a 100644
|
||||
--- a/eth/sync.go
|
||||
+++ b/eth/sync.go
|
||||
@@ -135,7 +135,6 @@ func (pm *ProtocolManager) syncer() {
|
||||
// Start and ensure cleanup of sync mechanisms
|
||||
pm.fetcher.Start()
|
||||
|
@ -83,10 +83,10 @@ index e49e40087..4367434a6 100644
|
|||
|
||||
// Wait for different events to fire synchronisation operations
|
||||
forceSync := time.NewTicker(forceSyncCycle)
|
||||
diff --git i/les/backend.go w/les/backend.go
|
||||
index 6a324cb04..e3844bf84 100644
|
||||
--- i/les/backend.go
|
||||
+++ w/les/backend.go
|
||||
diff --git a/les/backend.go b/les/backend.go
|
||||
index 00025ba6..38c36da6 100644
|
||||
--- a/les/backend.go
|
||||
+++ b/les/backend.go
|
||||
@@ -20,7 +20,6 @@ package les
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -95,7 +95,7 @@ index 6a324cb04..e3844bf84 100644
|
|||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -250,7 +249,6 @@ func (s *LightEthereum) Stop() error {
|
||||
@@ -253,7 +252,6 @@ func (s *LightEthereum) Stop() error {
|
||||
|
||||
s.eventMux.Stop()
|
||||
|
||||
|
@ -103,13 +103,13 @@ index 6a324cb04..e3844bf84 100644
|
|||
s.chainDb.Close()
|
||||
close(s.shutdownChan)
|
||||
|
||||
diff --git i/les/handler.go w/les/handler.go
|
||||
index 9627f392b..f2bbe899f 100644
|
||||
--- i/les/handler.go
|
||||
+++ w/les/handler.go
|
||||
@@ -241,6 +241,9 @@ func (pm *ProtocolManager) Stop() {
|
||||
|
||||
close(pm.quitSync) // quits syncer, fetcher
|
||||
diff --git a/les/handler.go b/les/handler.go
|
||||
index ca40eaab..cc15d68c 100644
|
||||
--- a/les/handler.go
|
||||
+++ b/les/handler.go
|
||||
@@ -194,6 +194,9 @@ func (pm *ProtocolManager) Stop() {
|
||||
pm.clientPool.stop()
|
||||
}
|
||||
|
||||
+ // Stop downloader and make sure that all the running downloads are complete.
|
||||
+ pm.downloader.Terminate()
|
||||
|
@ -117,10 +117,10 @@ index 9627f392b..f2bbe899f 100644
|
|||
// Disconnect existing sessions.
|
||||
// This also closes the gate for any new registrations on the peer set.
|
||||
// sessions which are already established but not added to pm.peers yet
|
||||
diff --git i/les/sync.go w/les/sync.go
|
||||
index c3d37e2f3..fc1f076c7 100644
|
||||
--- i/les/sync.go
|
||||
+++ w/les/sync.go
|
||||
diff --git a/les/sync.go b/les/sync.go
|
||||
index 1ac64558..eb155377 100644
|
||||
--- a/les/sync.go
|
||||
+++ b/les/sync.go
|
||||
@@ -31,7 +31,6 @@ func (pm *ProtocolManager) syncer() {
|
||||
// Start and ensure cleanup of sync mechanisms
|
||||
//pm.fetcher.Start()
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go
|
||||
index d0eae28f..113dabc8 100644
|
||||
--- a/p2p/discv5/net.go
|
||||
+++ b/p2p/discv5/net.go
|
||||
@@ -678,7 +678,7 @@ func (net *Network) refresh(done chan<- struct{}) {
|
||||
}
|
||||
if len(seeds) == 0 {
|
||||
log.Trace("no seed nodes found")
|
||||
- close(done)
|
||||
+ time.AfterFunc(time.Second*10, func() { close(done) })
|
||||
return
|
||||
}
|
||||
for _, n := range seeds {
|
|
@ -1,31 +0,0 @@
|
|||
diff --git a/light/postprocess.go b/light/postprocess.go
|
||||
index 2090a9d0..6e18010c 100644
|
||||
--- a/light/postprocess.go
|
||||
+++ b/light/postprocess.go
|
||||
@@ -59,18 +59,18 @@ type trustedCheckpoint struct {
|
||||
var (
|
||||
mainnetCheckpoint = trustedCheckpoint{
|
||||
name: "mainnet",
|
||||
- sectionIdx: 179,
|
||||
- sectionHead: common.HexToHash("ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea"),
|
||||
- chtRoot: common.HexToHash("646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16"),
|
||||
- bloomTrieRoot: common.HexToHash("d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25"),
|
||||
+ sectionIdx: 187,
|
||||
+ sectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"),
|
||||
+ chtRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"),
|
||||
+ bloomTrieRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"),
|
||||
}
|
||||
|
||||
ropstenCheckpoint = trustedCheckpoint{
|
||||
name: "ropsten",
|
||||
- sectionIdx: 107,
|
||||
- sectionHead: common.HexToHash("e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce"),
|
||||
- chtRoot: common.HexToHash("15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0"),
|
||||
- bloomTrieRoot: common.HexToHash("e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2"),
|
||||
+ sectionIdx: 117,
|
||||
+ sectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"),
|
||||
+ chtRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"),
|
||||
+ bloomTrieRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"),
|
||||
}
|
||||
)
|
||||
|
|
@ -3,6 +3,7 @@ package registry
|
|||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
|
@ -44,7 +45,7 @@ func (s *VerifierTestSuite) setupBackendAndContract() {
|
|||
auth := bind.NewKeyedTransactor(s.privKey)
|
||||
alloc := make(core.GenesisAlloc)
|
||||
alloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(133700000)}
|
||||
s.backend = backends.NewSimulatedBackend(alloc)
|
||||
s.backend = backends.NewSimulatedBackend(alloc, math.MaxInt64)
|
||||
|
||||
s.contractAddress, _, s.registry, err = DeployRegistry(auth, s.backend)
|
||||
s.Require().NoError(err)
|
||||
|
|
|
@ -3,6 +3,7 @@ package transactions
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
@ -264,7 +265,7 @@ func (s *TransactorSuite) TestContractCreation() {
|
|||
genesis := core.GenesisAlloc{
|
||||
testaddr: {Balance: big.NewInt(100000000000)},
|
||||
}
|
||||
backend := backends.NewSimulatedBackend(genesis)
|
||||
backend := backends.NewSimulatedBackend(genesis, math.MaxInt64)
|
||||
selectedAccount := &account.SelectedExtKey{
|
||||
Address: testaddr,
|
||||
AccountKey: &keystore.Key{PrivateKey: key},
|
||||
|
|
|
@ -30,8 +30,6 @@ matrix:
|
|||
go: 1.10.x
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
|||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
|
@ -69,7 +69,7 @@ This command will:
|
|||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
This tool is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
|
|
|
@ -65,9 +65,9 @@ type SimulatedBackend struct {
|
|||
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ type Wallet interface {
|
|||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
|
||||
|
|
|
@ -56,9 +56,9 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
|
|||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
// Skip any non-key files from the folder
|
||||
if nonKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
|
@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
|
|||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func nonKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
|
|
|
@ -147,6 +147,9 @@ var (
|
|||
debEthereum,
|
||||
}
|
||||
|
||||
// Packages to be cross-compiled by the xgo command
|
||||
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
|
||||
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
|
@ -641,17 +644,6 @@ func (meta debMetadata) ExeName(exe debExecutable) string {
|
|||
return exe.Package()
|
||||
}
|
||||
|
||||
// EthereumSwarmPackageName returns the name of the swarm package based on
|
||||
// environment, e.g. "ethereum-swarm-unstable", or "ethereum-swarm".
|
||||
// This is needed so that we make sure that "ethereum" package,
|
||||
// depends on and installs "ethereum-swarm"
|
||||
func (meta debMetadata) EthereumSwarmPackageName() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return debSwarm.Name + "-unstable"
|
||||
}
|
||||
return debSwarm.Name
|
||||
}
|
||||
|
||||
// ExeConflicts returns the content of the Conflicts field
|
||||
// for executable packages.
|
||||
func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||
|
@ -1009,7 +1001,7 @@ func doXgo(cmdline []string) {
|
|||
|
||||
if *alltools {
|
||||
args = append(args, []string{"--dest", GOBIN}...)
|
||||
for _, res := range allToolsArchiveFiles {
|
||||
for _, res := range allCrossCompiledArchiveFiles {
|
||||
if strings.HasPrefix(res, GOBIN) {
|
||||
// Binary tool found, cross build it explicitly
|
||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
||||
|
|
|
@ -10,7 +10,7 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
|
|||
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.EthereumSwarmPackageName}}, {{.ExeList}}
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth, swarm, and other tools
|
||||
Meta-package to install geth, swarm and other tools
|
||||
|
||||
|
|
|
@ -50,6 +50,6 @@ func compileCmd(ctx *cli.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(ctx.App.Writer, bin)
|
||||
fmt.Println(bin)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -45,6 +45,6 @@ func disasmCmd(ctx *cli.Context) error {
|
|||
}
|
||||
|
||||
code := strings.TrimSpace(string(in[:]))
|
||||
fmt.Fprintf(ctx.App.Writer, "%v\n", code)
|
||||
fmt.Printf("%v\n", code)
|
||||
return asm.PrintDisassembled(code)
|
||||
}
|
||||
|
|
|
@ -30,11 +30,10 @@ func Compile(fn string, src []byte, debug bool) (string, error) {
|
|||
bin, compileErrors := compiler.Compile()
|
||||
if len(compileErrors) > 0 {
|
||||
// report errors
|
||||
errs := ""
|
||||
for _, err := range compileErrors {
|
||||
errs += fmt.Sprintf("%s:%v\n", fn, err)
|
||||
fmt.Printf("%s:%v\n", fn, err)
|
||||
}
|
||||
return "", errors.New(errs + "compiling failed\n")
|
||||
return "", errors.New("compiling failed")
|
||||
}
|
||||
return bin, nil
|
||||
}
|
||||
|
|
|
@ -128,13 +128,13 @@ func runCmd(ctx *cli.Context) error {
|
|||
if ctx.GlobalString(CodeFileFlag.Name) == "-" {
|
||||
//Try reading from stdin
|
||||
if hexcode, err = ioutil.ReadAll(os.Stdin); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from stdin: %v\n", err)
|
||||
fmt.Printf("Could not load code from stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Codefile with hex assembly
|
||||
if hexcode, err = ioutil.ReadFile(ctx.GlobalString(CodeFileFlag.Name)); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "Could not load code from file: %v\n", err)
|
||||
fmt.Printf("Could not load code from file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -172,11 +172,11 @@ func runCmd(ctx *cli.Context) error {
|
|||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create CPU profile: %v\n", err)
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not start CPU profile: %v\n", err)
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
|
@ -200,17 +200,17 @@ func runCmd(ctx *cli.Context) error {
|
|||
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Fprintln(ctx.App.Writer, string(statedb.Dump()))
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "could not create memory profile: %v\n", err)
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
|
@ -218,17 +218,17 @@ func runCmd(ctx *cli.Context) error {
|
|||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugLogger != nil {
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugLogger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugLogger.StructLogs())
|
||||
}
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### LOGS ####")
|
||||
vm.WriteLogs(ctx.App.ErrWriter, statedb.Logs())
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(ctx.App.ErrWriter, `evm execution time: %v
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
heap objects: %d
|
||||
allocations: %d
|
||||
total allocations: %d
|
||||
|
@ -238,9 +238,9 @@ Gas used: %d
|
|||
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
|
||||
}
|
||||
if tracer == nil {
|
||||
fmt.Fprintf(ctx.App.Writer, "0x%x\n", ret)
|
||||
fmt.Printf("0x%x\n", ret)
|
||||
if err != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, " error: %v\n", err)
|
||||
fmt.Printf(" error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||
}
|
||||
// print state root for evmlab tracing (already committed above, so no need to delete objects again
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(ctx.App.ErrWriter, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
}
|
||||
|
||||
results = append(results, *result)
|
||||
|
@ -115,13 +115,13 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||
// Print any structured logs collected
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
if debugger != nil {
|
||||
fmt.Fprintln(ctx.App.ErrWriter, "#### TRACE ####")
|
||||
vm.WriteTrace(ctx.App.ErrWriter, debugger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, debugger.StructLogs())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out, _ := json.MarshalIndent(results, "", " ")
|
||||
fmt.Fprintln(ctx.App.Writer, string(out))
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -48,7 +48,6 @@ var (
|
|||
ArgsUsage: "<genesisPath>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -66,7 +65,7 @@ It expects the genesis file as argument.`,
|
|||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
|
@ -87,7 +86,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
|
|||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -105,7 +104,7 @@ be gzipped.`,
|
|||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -119,7 +118,7 @@ be gzipped.`,
|
|||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -149,7 +148,6 @@ The first argument must be the directory containing the blockchain to download f
|
|||
ArgsUsage: " ",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
@ -163,7 +161,7 @@ Remove blockchain and state databases`,
|
|||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
|
|
|
@ -72,6 +72,7 @@ var (
|
|||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
|
@ -82,8 +83,6 @@ var (
|
|||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.LightServFlag,
|
||||
|
@ -96,11 +95,19 @@ var (
|
|||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
|
@ -121,7 +128,6 @@ var (
|
|||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
|
@ -323,7 +329,7 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
|
@ -331,7 +337,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
}
|
||||
if threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
|
@ -340,7 +350,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
ethereum.TxPool().SetGasPrice(utils.GlobalBig(ctx, utils.GasPriceFlag.Name))
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
|
||||
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
|
|
|
@ -114,6 +114,7 @@ var AppHelpFlagGroups = []flagGroup{
|
|||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
utils.TxPoolRejournalFlag,
|
||||
|
@ -185,10 +186,12 @@ var AppHelpFlagGroups = []flagGroup{
|
|||
Flags: []cli.Flag{
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.ExtraDataFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -230,8 +233,11 @@ var AppHelpFlagGroups = []flagGroup{
|
|||
{
|
||||
Name: "DEPRECATED",
|
||||
Flags: []cli.Flag{
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.MinerLegacyThreadsFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerLegacyEtherbaseFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -678,9 +678,9 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||
|
||||
// Build and deploy the dashboard service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// dashboardInfos is returned from a dashboard status check to allow reporting
|
||||
|
|
|
@ -100,9 +100,9 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
|||
|
||||
// Build and deploy the ethstats service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// ethstatsInfos is returned from an ethstats status check to allow reporting
|
||||
|
|
|
@ -38,7 +38,7 @@ ADD chain.json /chain.json
|
|||
RUN \
|
||||
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
|
||||
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
|
||||
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
echo 'exec /parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "explorer.sh"]
|
||||
`
|
||||
|
@ -140,9 +140,9 @@ func deployExplorer(client *sshClient, network string, chainspec []byte, config
|
|||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// explorerInfos is returned from a block explorer status check to allow reporting
|
||||
|
|
|
@ -133,9 +133,9 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
|||
|
||||
// Build and deploy the faucet service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// faucetInfos is returned from a faucet status check to allow reporting various
|
||||
|
|
|
@ -81,9 +81,9 @@ func deployNginx(client *sshClient, network string, port int, nocache bool) ([]b
|
|||
|
||||
// Build and deploy the reverse-proxy service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nginxInfos is returned from an nginx reverse-proxy status check to allow
|
||||
|
|
|
@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
|||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
|
@ -139,9 +139,9 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
|||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// nodeInfos is returned from a boot or seal node status check to allow reporting
|
||||
|
|
|
@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
|
|||
RUN \
|
||||
echo 'node server.js &' > wallet.sh && \
|
||||
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
|
||||
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
|
||||
|
||||
RUN \
|
||||
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
|
||||
|
@ -120,9 +120,9 @@ func deployWallet(client *sshClient, network string, bootnodes []string, config
|
|||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate --timeout 60", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate --timeout 60", workdir, network))
|
||||
}
|
||||
|
||||
// walletInfos is returned from a web wallet status check to allow reporting
|
||||
|
|
|
@ -45,33 +45,44 @@ type sshClient struct {
|
|||
|
||||
// dial establishes an SSH connection to a remote node using the current user and
|
||||
// the user's configured private RSA key. If that fails, password authentication
|
||||
// is fallen back to. The caller may override the login user via user@server:port.
|
||||
// is fallen back to. server can be a string like user:identity@server:port.
|
||||
func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// Figure out a label for the server and a logger
|
||||
label := server
|
||||
if strings.Contains(label, ":") {
|
||||
label = label[:strings.Index(label, ":")]
|
||||
}
|
||||
login := ""
|
||||
// Figure out username, identity, hostname and port
|
||||
hostname := ""
|
||||
hostport := server
|
||||
username := ""
|
||||
identity := "id_rsa" // default
|
||||
|
||||
if strings.Contains(server, "@") {
|
||||
login = label[:strings.Index(label, "@")]
|
||||
label = label[strings.Index(label, "@")+1:]
|
||||
server = server[strings.Index(server, "@")+1:]
|
||||
prefix := server[:strings.Index(server, "@")]
|
||||
if strings.Contains(prefix, ":") {
|
||||
username = prefix[:strings.Index(prefix, ":")]
|
||||
identity = prefix[strings.Index(prefix, ":")+1:]
|
||||
} else {
|
||||
username = prefix
|
||||
}
|
||||
hostport = server[strings.Index(server, "@")+1:]
|
||||
}
|
||||
logger := log.New("server", label)
|
||||
if strings.Contains(hostport, ":") {
|
||||
hostname = hostport[:strings.Index(hostport, ":")]
|
||||
} else {
|
||||
hostname = hostport
|
||||
hostport += ":22"
|
||||
}
|
||||
logger := log.New("server", server)
|
||||
logger.Debug("Attempting to establish SSH connection")
|
||||
|
||||
user, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if login == "" {
|
||||
login = user.Username
|
||||
if username == "" {
|
||||
username = user.Username
|
||||
}
|
||||
// Configure the supported authentication methods (private key and password)
|
||||
var auths []ssh.AuthMethod
|
||||
|
||||
path := filepath.Join(user.HomeDir, ".ssh", "id_rsa")
|
||||
path := filepath.Join(user.HomeDir, ".ssh", identity)
|
||||
if buf, err := ioutil.ReadFile(path); err != nil {
|
||||
log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
|
||||
} else {
|
||||
|
@ -94,14 +105,14 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
|||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
|
||||
blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
|
||||
fmt.Println()
|
||||
return string(blob), err
|
||||
}))
|
||||
// Resolve the IP address of the remote server
|
||||
addr, err := net.LookupHost(label)
|
||||
addr, err := net.LookupHost(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -109,10 +120,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
|||
return nil, errors.New("no IPs associated with domain")
|
||||
}
|
||||
// Try to dial in to the remote server
|
||||
logger.Trace("Dialing remote SSH server", "user", login)
|
||||
if !strings.Contains(server, ":") {
|
||||
server += ":22"
|
||||
}
|
||||
logger.Trace("Dialing remote SSH server", "user", username)
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
|
@ -139,13 +147,13 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
|||
// We have a mismatch, forbid connecting
|
||||
return errors.New("ssh key mismatch, readd the machine to update")
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
|
||||
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connection established, return our utility wrapper
|
||||
c := &sshClient{
|
||||
server: label,
|
||||
server: hostname,
|
||||
address: addr[0],
|
||||
pubkey: pubkey,
|
||||
client: client,
|
||||
|
|
|
@ -82,7 +82,6 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
|||
logger.Info("Starting remote server health-check")
|
||||
|
||||
stat := &serverStat{
|
||||
address: client.address,
|
||||
services: make(map[string]map[string]string),
|
||||
}
|
||||
if client == nil {
|
||||
|
@ -94,6 +93,8 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
|||
}
|
||||
client = conn
|
||||
}
|
||||
stat.address = client.address
|
||||
|
||||
// Client connected one way or another, run health-checks
|
||||
logger.Debug("Checking for nginx availability")
|
||||
if infos, err := checkNginx(client, w.network); err != nil {
|
||||
|
@ -214,6 +215,9 @@ func (stats serverStats) render() {
|
|||
if len(stat.address) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.address))
|
||||
}
|
||||
if len(stat.failure) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.failure))
|
||||
}
|
||||
for service, configs := range stat.services {
|
||||
if len(service) > len(separator[2]) {
|
||||
separator[2] = strings.Repeat("-", len(service))
|
||||
|
@ -250,7 +254,11 @@ func (stats serverStats) render() {
|
|||
sort.Strings(services)
|
||||
|
||||
if len(services) == 0 {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
if stats[server].failure != "" {
|
||||
table.Append([]string{server, stats[server].failure, "", "", ""})
|
||||
} else {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
}
|
||||
}
|
||||
for j, service := range services {
|
||||
// Add an empty line between all services
|
||||
|
|
|
@ -62,14 +62,14 @@ func (w *wizard) manageServers() {
|
|||
}
|
||||
}
|
||||
|
||||
// makeServer reads a single line from stdin and interprets it as a hostname to
|
||||
// connect to. It tries to establish a new SSH session and also executing some
|
||||
// baseline validations.
|
||||
// makeServer reads a single line from stdin and interprets it as
|
||||
// username:identity@hostname to connect to. It tries to establish a
|
||||
// new SSH session and also executing some baseline validations.
|
||||
//
|
||||
// If connection succeeds, the server is added to the wizards configs!
|
||||
func (w *wizard) makeServer() string {
|
||||
fmt.Println()
|
||||
fmt.Println("Please enter remote server's address:")
|
||||
fmt.Println("What is the remote server's address ([username[:identity]@]hostname[:port])?")
|
||||
|
||||
// Read and dial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
|
|
@ -0,0 +1,219 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var salt = make([]byte, 32)
|
||||
|
||||
func init() {
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewPass(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
accessKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
password = getPassPhrase("", 0, makePasswordList(ctx))
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
accessKey, ae, err = api.DoPasswordNew(ctx, password, salt)
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
utils.Fatalf("uploading manifests")
|
||||
err = uploadManifests(ctx, m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewPK(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
sessionKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
privateKey = getPrivKey(ctx)
|
||||
granteePublicKey = ctx.String(SwarmAccessGrantKeyFlag.Name)
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
sessionKey, ae, err = api.DoPKNew(ctx, privateKey, granteePublicKey, salt)
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
|
||||
if dryRun {
|
||||
err = printManifests(m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = uploadManifests(ctx, m, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func accessNewACT(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) != 1 {
|
||||
utils.Fatalf("Expected 1 argument - the ref")
|
||||
}
|
||||
|
||||
var (
|
||||
ae *api.AccessEntry
|
||||
actManifest *api.Manifest
|
||||
accessKey []byte
|
||||
err error
|
||||
ref = args[0]
|
||||
grantees = []string{}
|
||||
actFilename = ctx.String(SwarmAccessGrantKeysFlag.Name)
|
||||
privateKey = getPrivKey(ctx)
|
||||
dryRun = ctx.Bool(SwarmDryRunFlag.Name)
|
||||
)
|
||||
|
||||
bytes, err := ioutil.ReadFile(actFilename)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error reading the grantee public key list")
|
||||
}
|
||||
grantees = strings.Split(string(bytes), "\n")
|
||||
accessKey, ae, actManifest, err = api.DoACTNew(ctx, privateKey, salt, grantees)
|
||||
if err != nil {
|
||||
utils.Fatalf("error generating ACT manifest: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("error getting session key: %v", err)
|
||||
}
|
||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||
if err != nil {
|
||||
utils.Fatalf("error generating root access manifest: %v", err)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
err = printManifests(m, actManifest)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error printing the manifests: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = uploadManifests(ctx, m, actManifest)
|
||||
if err != nil {
|
||||
utils.Fatalf("had an error uploading the manifests: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printManifests(rootAccessManifest, actManifest *api.Manifest) error {
|
||||
js, err := json.Marshal(rootAccessManifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(js))
|
||||
|
||||
if actManifest != nil {
|
||||
js, err := json.Marshal(actManifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(js))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func uploadManifests(ctx *cli.Context, rootAccessManifest, actManifest *api.Manifest) error {
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := client.NewClient(bzzapi)
|
||||
|
||||
var (
|
||||
key string
|
||||
err error
|
||||
)
|
||||
if actManifest != nil {
|
||||
key, err = client.UploadManifest(actManifest, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootAccessManifest.Entries[0].Access.Act = key
|
||||
}
|
||||
key, err = client.UploadManifest(rootAccessManifest, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// makePasswordList reads password lines from the file specified by the global --password flag
|
||||
// and also by the same subcommand --password flag.
|
||||
// This function ia a fork of utils.MakePasswordList to lookup cli context for subcommand.
|
||||
// Function ctx.SetGlobal is not setting the global flag value that can be accessed
|
||||
// by ctx.GlobalString using the current version of cli package.
|
||||
func makePasswordList(ctx *cli.Context) []string {
|
||||
path := ctx.GlobalString(utils.PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
path = ctx.String(utils.PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
text, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read password file: %v", err)
|
||||
}
|
||||
lines := strings.Split(string(text), "\n")
|
||||
// Sanitise DOS line endings.
|
||||
for i := range lines {
|
||||
lines[i] = strings.TrimRight(lines[i], "\r")
|
||||
}
|
||||
return lines
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
var SwarmBootnodes = []string{
|
||||
// Foundation Swarm Gateway Cluster
|
||||
"enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399",
|
||||
"enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400",
|
||||
"enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401",
|
||||
"enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402",
|
||||
"enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403",
|
||||
"enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404",
|
||||
"enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405",
|
||||
"enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406",
|
||||
"enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407",
|
||||
"enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408",
|
||||
"enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409",
|
||||
"enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410",
|
||||
"enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411",
|
||||
"enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412",
|
||||
"enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413",
|
||||
"enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414",
|
||||
"enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415",
|
||||
"enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416",
|
||||
"enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417",
|
||||
"enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418",
|
||||
"enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419",
|
||||
"enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420",
|
||||
"enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421",
|
||||
"enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422",
|
||||
"enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423",
|
||||
"enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424",
|
||||
"enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425",
|
||||
"enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426",
|
||||
"enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427",
|
||||
"enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428",
|
||||
"enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429",
|
||||
"enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430",
|
||||
"enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431",
|
||||
"enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432",
|
||||
"enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433",
|
||||
"enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434",
|
||||
"enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435",
|
||||
"enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436",
|
||||
"enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437",
|
||||
"enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438",
|
||||
"enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439",
|
||||
"enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440",
|
||||
"enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441",
|
||||
"enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442",
|
||||
"enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443",
|
||||
"enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444",
|
||||
"enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445",
|
||||
"enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446",
|
||||
|
||||
// Mainframe
|
||||
"enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399",
|
||||
"enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399",
|
||||
"enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399",
|
||||
"enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399",
|
||||
"enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399",
|
||||
"enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399",
|
||||
}
|
|
@ -68,6 +68,7 @@ const (
|
|||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
|
||||
SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
|
||||
SWARM_ENV_LIGHT_NODE_ENABLE = "SWARM_LIGHT_NODE_ENABLE"
|
||||
SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
|
@ -77,6 +78,7 @@ const (
|
|||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
|
@ -131,7 +133,7 @@ func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
|
|||
log.Debug(printConfig(config))
|
||||
}
|
||||
|
||||
//override the current config with whatever is in the config file, if a config file has been provided
|
||||
//configFileOverride overrides the current config with the config file, if a config file has been provided
|
||||
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) {
|
||||
var err error
|
||||
|
||||
|
@ -141,7 +143,8 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config
|
|||
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" {
|
||||
utils.Fatalf("Config file flag provided with invalid file path")
|
||||
}
|
||||
f, err := os.Open(filepath)
|
||||
var f *os.File
|
||||
f, err = os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -204,6 +207,10 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
|||
currentConfig.SyncUpdateDelay = d
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmLightNodeEnabled.Name) {
|
||||
currentConfig.LightNodeEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
|
||||
currentConfig.DeliverySkipCheck = true
|
||||
}
|
||||
|
@ -226,10 +233,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
|||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
|
||||
currentConfig.LocalStoreParams.ChunkDbPath = storePath
|
||||
}
|
||||
|
@ -301,6 +304,12 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
if lne := os.Getenv(SWARM_ENV_LIGHT_NODE_ENABLE); lne != "" {
|
||||
if lightnode, err := strconv.ParseBool(lne); err != nil {
|
||||
currentConfig.LightNodeEnabled = lightnode
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapAPI = swapapi
|
||||
}
|
||||
|
@ -321,10 +330,6 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
|||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" {
|
||||
currentConfig.BootNodes = bootnodes
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
}
|
||||
|
||||
|
|
|
@ -68,18 +68,36 @@ func download(ctx *cli.Context) {
|
|||
utils.Fatalf("could not parse uri argument: %v", err)
|
||||
}
|
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
|
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
|
||||
dl := func(credentials string) error {
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive {
|
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest, credentials); err != nil {
|
||||
if err == swarm.ErrUnauthorized {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("directory %s: %v", uri.Path, err)
|
||||
}
|
||||
} else {
|
||||
// we are downloading a file
|
||||
log.Debug("downloading file/path from a manifest", "uri.Addr", uri.Addr, "uri.Path", uri.Path)
|
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest, credentials)
|
||||
if err != nil {
|
||||
if err == swarm.ErrUnauthorized {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("file %s from address: %s: %v", uri.Path, uri.Addr, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if passwords := makePasswordList(ctx); passwords != nil {
|
||||
password := getPassPhrase(fmt.Sprintf("Downloading %s is restricted", uri), 0, passwords)
|
||||
err = dl(password)
|
||||
} else {
|
||||
err = dl("")
|
||||
}
|
||||
if err != nil {
|
||||
utils.Fatalf("download: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func listMounts(cliContext *cli.Context) {
|
|||
mf := []fuse.MountInfo{}
|
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
|
||||
if err != nil {
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
|
||||
utils.Fatalf("encountered an error calling the RPC endpoint while listing mounts: %v", err)
|
||||
}
|
||||
if len(mf) == 0 {
|
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
|
||||
|
|
|
@ -44,7 +44,7 @@ func list(ctx *cli.Context) {
|
|||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
list, err := client.List(manifest, prefix)
|
||||
list, err := client.List(manifest, prefix, "")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate file and directory list: %s", err)
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
|
@ -67,14 +66,7 @@ OPTIONS:
|
|||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
testbetBootNodes = []string{
|
||||
"enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429",
|
||||
"enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430",
|
||||
"enode://fe29b82319b734ce1ec68b84657d57145fee237387e63273989d354486731e59f78858e452ef800a020559da22dcca759536e6aa5517c53930d29ce0b1029286@13.74.157.139:30431",
|
||||
"enode://1d7187e7bde45cf0bee489ce9852dd6d1a0d9aa67a33a6b8e6db8a4fbc6fcfa6f0f1a5419343671521b863b187d1c73bad3603bae66421d157ffef357669ddb8@13.74.157.139:30432",
|
||||
"enode://0e4cba800f7b1ee73673afa6a4acead4018f0149d2e3216be3f133318fd165b324cd71b81fbe1e80deac8dbf56e57a49db7be67f8b9bc81bd2b7ee496434fb5d@13.74.157.139:30433",
|
||||
}
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -123,6 +115,11 @@ var (
|
|||
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
|
||||
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
|
||||
}
|
||||
SwarmLightNodeEnabled = cli.BoolFlag{
|
||||
Name: "lightnode",
|
||||
Usage: "Enable Swarm LightNode (default false)",
|
||||
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
|
||||
}
|
||||
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
|
||||
Name: "delivery-skip-check",
|
||||
Usage: "Skip chunk delivery check (default false)",
|
||||
|
@ -150,6 +147,14 @@ var (
|
|||
Name: "defaultpath",
|
||||
Usage: "path to file served for empty url path (none)",
|
||||
}
|
||||
SwarmAccessGrantKeyFlag = cli.StringFlag{
|
||||
Name: "grant-key",
|
||||
Usage: "grants a given public key access to an ACT",
|
||||
}
|
||||
SwarmAccessGrantKeysFlag = cli.StringFlag{
|
||||
Name: "grant-keys",
|
||||
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
|
||||
}
|
||||
SwarmUpFromStdinFlag = cli.BoolFlag{
|
||||
Name: "stdin",
|
||||
Usage: "reads data to be uploaded from stdin",
|
||||
|
@ -162,6 +167,15 @@ var (
|
|||
Name: "encrypt",
|
||||
Usage: "use encrypted upload",
|
||||
}
|
||||
SwarmAccessPasswordFlag = cli.StringFlag{
|
||||
Name: "password",
|
||||
Usage: "Password",
|
||||
EnvVar: SWARM_ACCESS_PASSWORD,
|
||||
}
|
||||
SwarmDryRunFlag = cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "dry-run",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
|
@ -247,6 +261,61 @@ func init() {
|
|||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "access",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "new",
|
||||
Usage: "encrypts a reference and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accessNewPass,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "pass",
|
||||
Usage: "encrypts a reference with a password and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewPK,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
utils.PasswordFileFlag,
|
||||
SwarmDryRunFlag,
|
||||
SwarmAccessGrantKeyFlag,
|
||||
},
|
||||
Name: "pk",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
{
|
||||
Action: accessNewACT,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Flags: []cli.Flag{
|
||||
SwarmAccessGrantKeysFlag,
|
||||
SwarmDryRunFlag,
|
||||
},
|
||||
Name: "act",
|
||||
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
|
||||
ArgsUsage: "<ref>",
|
||||
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "resource",
|
||||
|
@ -299,16 +368,13 @@ func init() {
|
|||
Description: "Prints the swarm hash of file or directory",
|
||||
},
|
||||
{
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `
|
||||
Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
|
||||
`,
|
||||
Action: download,
|
||||
Name: "down",
|
||||
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
|
||||
Usage: "downloads a swarm manifest or a file inside a manifest",
|
||||
ArgsUsage: " <uri> [<dir>]",
|
||||
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
|
||||
},
|
||||
|
||||
{
|
||||
Name: "manifest",
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
|
@ -317,23 +383,23 @@ Downloads a swarm bzz uri to the given dir. When no dir is provided, working dir
|
|||
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: add,
|
||||
Action: manifestAdd,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "add",
|
||||
Usage: "add a new path to the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
|
||||
ArgsUsage: "<MANIFEST> <path> <hash>",
|
||||
Description: "Adds a new path to the manifest",
|
||||
},
|
||||
{
|
||||
Action: update,
|
||||
Action: manifestUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "update the hash for an already existing path in the manifest",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
|
||||
ArgsUsage: "<MANIFEST> <path> <newhash>",
|
||||
Description: "Update the hash for an already existing path in the manifest",
|
||||
},
|
||||
{
|
||||
Action: remove,
|
||||
Action: manifestRemove,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "remove",
|
||||
Usage: "removes a path from the manifest",
|
||||
|
@ -408,16 +474,14 @@ pv(1) tool to get a progress bar:
|
|||
Name: "import",
|
||||
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||
ArgsUsage: "<chunkdb> <file>",
|
||||
Description: `
|
||||
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||
|
||||
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||
|
||||
The import may be quite large, consider piping the input through the Unix
|
||||
pv(1) tool to get a progress bar:
|
||||
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
|
||||
`,
|
||||
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
|
||||
},
|
||||
{
|
||||
Action: dbClean,
|
||||
|
@ -464,6 +528,7 @@ pv(1) tool to get a progress bar:
|
|||
SwarmSwapAPIFlag,
|
||||
SwarmSyncDisabledFlag,
|
||||
SwarmSyncUpdateDelay,
|
||||
SwarmLightNodeEnabled,
|
||||
SwarmDeliverySkipCheckFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
|
@ -529,6 +594,7 @@ func version(ctx *cli.Context) error {
|
|||
func bzzd(ctx *cli.Context) error {
|
||||
//build a valid bzzapi.Config from all available sources:
|
||||
//default config, file config, command line and env vars
|
||||
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
|
@ -545,12 +611,16 @@ func bzzd(ctx *cli.Context) error {
|
|||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
|
||||
//optionally set the bootnodes before configuring the node
|
||||
setSwarmBootstrapNodes(ctx, &cfg)
|
||||
//setup the ethereum node
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
|
||||
//a few steps need to be done after the config phase is completed,
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
|
@ -568,16 +638,6 @@ func bzzd(ctx *cli.Context) error {
|
|||
stack.Stop()
|
||||
}()
|
||||
|
||||
// Add bootnodes as initial peers.
|
||||
if bzzconfig.BootNodes != "" {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if bzzconfig.NetworkID == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
|
||||
stack.Wait()
|
||||
return nil
|
||||
}
|
||||
|
@ -685,17 +745,6 @@ func getPassPhrase(prompt string, i int, passwords []string) string {
|
|||
return password
|
||||
}
|
||||
|
||||
func injectBootnodes(srv *p2p.Server, nodes []string) {
|
||||
for _, url := range nodes {
|
||||
n, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
log.Error("Invalid swarm bootnode", "err", err)
|
||||
continue
|
||||
}
|
||||
srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
|
||||
// addDefaultHelpSubcommand scans through defined CLI commands and adds
|
||||
// a basic help subcommand to each
|
||||
// if a help command is already defined, it will take precedence over the default.
|
||||
|
@ -708,3 +757,20 @@ func addDefaultHelpSubcommands(commands []cli.Command) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) || ctx.GlobalIsSet(utils.BootnodesV4Flag.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.P2P.BootstrapNodes = []*discover.Node{}
|
||||
|
||||
for _, url := range SwarmBootnodes {
|
||||
node, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
|
||||
}
|
||||
cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node)
|
||||
}
|
||||
log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes))
|
||||
}
|
||||
|
|
|
@ -18,10 +18,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
|
@ -30,127 +28,118 @@ import (
|
|||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const bzzManifestJSON = "application/bzz-manifest+json"
|
||||
|
||||
func add(ctx *cli.Context) {
|
||||
// manifestAdd adds a new entry to the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print new (updated) manifest's hash.
|
||||
func manifestAdd(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 3 {
|
||||
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH> [<content-type>]")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
hash = args[2]
|
||||
|
||||
ctype string
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
|
||||
if len(args) > 3 {
|
||||
ctype = args[3]
|
||||
} else {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
m, _, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error downloading manifest to add: %v", err)
|
||||
}
|
||||
l := len(m.Entries)
|
||||
if l == 0 {
|
||||
utils.Fatalf("No entries in manifest %s", hash)
|
||||
} else if l > 1 {
|
||||
utils.Fatalf("Too many entries in manifest %s", hash)
|
||||
}
|
||||
|
||||
newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
|
||||
newManifest := addEntryToManifest(client, mhash, path, m.Entries[0])
|
||||
fmt.Println(newManifest)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func update(ctx *cli.Context) {
|
||||
|
||||
// manifestUpdate replaces an existing entry of the manifest at the given path.
|
||||
// New entry hash, the last argument, must be the hash of a manifest
|
||||
// with only one entry, which meta-data will be added to the original manifest.
|
||||
// On success, this function will print hash of the updated manifest.
|
||||
func manifestUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 3 {
|
||||
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH>")
|
||||
if len(args) != 3 {
|
||||
utils.Fatalf("Need exactly three arguments <MHASH> <path> <HASH>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
hash = args[2]
|
||||
|
||||
ctype string
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
if len(args) > 3 {
|
||||
ctype = args[3]
|
||||
} else {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(path))
|
||||
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
m, _, err := client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error downloading manifest to update: %v", err)
|
||||
}
|
||||
l := len(m.Entries)
|
||||
if l == 0 {
|
||||
utils.Fatalf("No entries in manifest %s", hash)
|
||||
} else if l > 1 {
|
||||
utils.Fatalf("Too many entries in manifest %s", hash)
|
||||
}
|
||||
|
||||
newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
|
||||
newManifest, _, defaultEntryUpdated := updateEntryInManifest(client, mhash, path, m.Entries[0], true)
|
||||
if defaultEntryUpdated {
|
||||
// Print informational message to stderr
|
||||
// allowing the user to get the new manifest hash from stdout
|
||||
// without the need to parse the complete output.
|
||||
fmt.Fprintln(os.Stderr, "Manifest default entry is updated, too")
|
||||
}
|
||||
fmt.Println(newManifest)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func remove(ctx *cli.Context) {
|
||||
// manifestRemove removes an existing entry of the manifest at the given path.
|
||||
// On success, this function will print hash of the manifest which does not
|
||||
// contain the path.
|
||||
func manifestRemove(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 2 {
|
||||
utils.Fatalf("Need at least two arguments <MHASH> <path>")
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf("Need exactly two arguments <MHASH> <path>")
|
||||
}
|
||||
|
||||
var (
|
||||
mhash = args[0]
|
||||
path = args[1]
|
||||
|
||||
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
|
||||
mroot api.Manifest
|
||||
)
|
||||
|
||||
newManifest := removeEntryFromManifest(ctx, mhash, path)
|
||||
fmt.Println(newManifest)
|
||||
bzzapi := strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client := swarm.NewClient(bzzapi)
|
||||
|
||||
if !wantManifest {
|
||||
// Print the manifest. This is the only output to stdout.
|
||||
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
|
||||
fmt.Println(string(mrootJSON))
|
||||
return
|
||||
}
|
||||
newManifest := removeEntryFromManifest(client, mhash, path)
|
||||
fmt.Println(newManifest)
|
||||
}
|
||||
|
||||
func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
func addEntryToManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry) string {
|
||||
var longestPathEntry = api.ManifestEntry{}
|
||||
|
||||
mroot, isEncrypted, err := client.DownloadManifest(mhash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" to add is valid and present in swarm
|
||||
_, _, err = client.DownloadManifest(hash)
|
||||
if err != nil {
|
||||
utils.Fatalf("Hash to add is not present: %v", err)
|
||||
}
|
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, entry := range mroot.Entries {
|
||||
if path == entry.Path {
|
||||
for _, e := range mroot.Entries {
|
||||
if path == e.Path {
|
||||
utils.Fatalf("Path %s already present, not adding anything", path)
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if e.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, e.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
longestPathEntry = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -159,25 +148,21 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
|||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||
newHash := addEntryToManifest(client, longestPathEntry.Hash, newPath, entry)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if longestPathEntry.Path == entry.Path {
|
||||
entry.Hash = newHash
|
||||
for _, e := range mroot.Entries {
|
||||
if longestPathEntry.Path == e.Path {
|
||||
e.Hash = newHash
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
}
|
||||
mroot = newMRoot
|
||||
} else {
|
||||
// Add the entry in the leaf Manifest
|
||||
newEntry := api.ManifestEntry{
|
||||
Hash: hash,
|
||||
Path: path,
|
||||
ContentType: ctype,
|
||||
}
|
||||
mroot.Entries = append(mroot.Entries, newEntry)
|
||||
entry.Path = path
|
||||
mroot.Entries = append(mroot.Entries, entry)
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
|
@ -185,14 +170,16 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
|||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
return newManifestHash
|
||||
|
||||
}
|
||||
|
||||
func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
|
||||
|
||||
// updateEntryInManifest updates an existing entry o path with a new one in the manifest with provided mhash
|
||||
// finding the path recursively through all nested manifests. Argument isRoot is used for default
|
||||
// entry update detection. If the updated entry has the same hash as the default entry, then the
|
||||
// default entry in root manifest will be updated too.
|
||||
// Returned values are the new manifest hash, hash of the entry that was replaced by the new entry and
|
||||
// a a bool that is true if default entry is updated.
|
||||
func updateEntryInManifest(client *swarm.Client, mhash, path string, entry api.ManifestEntry, isRoot bool) (newManifestHash, oldHash string, defaultEntryUpdated bool) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
newEntry = api.ManifestEntry{}
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
@ -202,17 +189,18 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
|||
utils.Fatalf("Manifest download failed: %v", err)
|
||||
}
|
||||
|
||||
//TODO: check if the "hash" with which to update is valid and present in swarm
|
||||
|
||||
// See if we path is in this Manifest or do we have to dig deeper
|
||||
for _, entry := range mroot.Entries {
|
||||
if path == entry.Path {
|
||||
newEntry = entry
|
||||
for _, e := range mroot.Entries {
|
||||
if path == e.Path {
|
||||
newEntry = e
|
||||
// keep the reference of the hash of the entry that should be replaced
|
||||
// for default entry detection
|
||||
oldHash = e.Hash
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if e.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, e.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
longestPathEntry = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -225,50 +213,50 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
|||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest add the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
|
||||
var newHash string
|
||||
newHash, oldHash, _ = updateEntryInManifest(client, longestPathEntry.Hash, newPath, entry, false)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if longestPathEntry.Path == entry.Path {
|
||||
entry.Hash = newHash
|
||||
for _, e := range mroot.Entries {
|
||||
if longestPathEntry.Path == e.Path {
|
||||
e.Hash = newHash
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
|
||||
}
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
if newEntry.Path != "" {
|
||||
// update the manifest if the new entry is found and
|
||||
// check if default entry should be updated
|
||||
if newEntry.Path != "" || isRoot {
|
||||
// Replace the hash for leaf Manifest
|
||||
newMRoot := &api.Manifest{}
|
||||
for _, entry := range mroot.Entries {
|
||||
if newEntry.Path == entry.Path {
|
||||
myEntry := api.ManifestEntry{
|
||||
Hash: hash,
|
||||
Path: entry.Path,
|
||||
ContentType: ctype,
|
||||
}
|
||||
newMRoot.Entries = append(newMRoot.Entries, myEntry)
|
||||
} else {
|
||||
for _, e := range mroot.Entries {
|
||||
if newEntry.Path == e.Path {
|
||||
entry.Path = e.Path
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
} else if isRoot && e.Path == "" && e.Hash == oldHash {
|
||||
entry.Path = e.Path
|
||||
newMRoot.Entries = append(newMRoot.Entries, entry)
|
||||
defaultEntryUpdated = true
|
||||
} else {
|
||||
newMRoot.Entries = append(newMRoot.Entries, e)
|
||||
}
|
||||
}
|
||||
mroot = newMRoot
|
||||
}
|
||||
|
||||
newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
|
||||
newManifestHash, err = client.UploadManifest(mroot, isEncrypted)
|
||||
if err != nil {
|
||||
utils.Fatalf("Manifest upload failed: %v", err)
|
||||
}
|
||||
return newManifestHash
|
||||
return newManifestHash, oldHash, defaultEntryUpdated
|
||||
}
|
||||
|
||||
func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
|
||||
func removeEntryFromManifest(client *swarm.Client, mhash, path string) string {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
entryToRemove = api.ManifestEntry{}
|
||||
longestPathEntry = api.ManifestEntry{}
|
||||
)
|
||||
|
@ -283,7 +271,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
|||
if path == entry.Path {
|
||||
entryToRemove = entry
|
||||
} else {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
if entry.ContentType == api.ManifestType {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
|
@ -299,7 +287,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
|||
if longestPathEntry.Path != "" {
|
||||
// Load the child Manifest remove the entry there
|
||||
newPath := path[len(longestPathEntry.Path):]
|
||||
newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
|
||||
newHash := removeEntryFromManifest(client, longestPathEntry.Hash, newPath)
|
||||
|
||||
// Replace the hash for parent Manifests
|
||||
newMRoot := &api.Manifest{}
|
||||
|
|
|
@ -98,6 +98,17 @@ func upload(ctx *cli.Context) {
|
|||
if !recursive {
|
||||
return "", errors.New("Argument is a directory and recursive upload is disabled")
|
||||
}
|
||||
if defaultPath != "" {
|
||||
// construct absolute default path
|
||||
absDefaultPath, _ := filepath.Abs(defaultPath)
|
||||
absFile, _ := filepath.Abs(file)
|
||||
// make sure absolute directory ends with only one "/"
|
||||
// to trim it from absolute default path and get relative default path
|
||||
absFile = strings.TrimRight(absFile, "/") + "/"
|
||||
if absDefaultPath != "" && absFile != "" && strings.HasPrefix(absDefaultPath, absFile) {
|
||||
defaultPath = strings.TrimPrefix(absDefaultPath, absFile)
|
||||
}
|
||||
}
|
||||
return client.UploadDirectory(file, defaultPath, "", toEncrypt)
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -158,14 +157,6 @@ var (
|
|||
Usage: "Document Root for HTTPClient file scheme",
|
||||
Value: DirectoryString{homeDir()},
|
||||
}
|
||||
FastSyncFlag = cli.BoolFlag{
|
||||
Name: "fast",
|
||||
Usage: "Enable fast syncing through state downloads (replaced by --syncmode)",
|
||||
}
|
||||
LightModeFlag = cli.BoolFlag{
|
||||
Name: "light",
|
||||
Usage: "Enable light client mode (replaced by --syncmode)",
|
||||
}
|
||||
defaultSyncMode = eth.DefaultConfig.SyncMode
|
||||
SyncModeFlag = TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
|
@ -242,6 +233,10 @@ var (
|
|||
Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolLocalsFlag = cli.StringFlag{
|
||||
Name: "txpool.locals",
|
||||
Usage: "Comma separated accounts to treat as locals (no flush, priority inclusion)",
|
||||
}
|
||||
TxPoolNoLocalsFlag = cli.BoolFlag{
|
||||
Name: "txpool.nolocals",
|
||||
Usage: "Disables price exemptions for locally submitted transactions",
|
||||
|
@ -318,29 +313,62 @@ var (
|
|||
Usage: "Enable mining",
|
||||
}
|
||||
MinerThreadsFlag = cli.IntFlag{
|
||||
Name: "minerthreads",
|
||||
Name: "miner.threads",
|
||||
Usage: "Number of CPU threads to use for mining",
|
||||
Value: runtime.NumCPU(),
|
||||
Value: 0,
|
||||
}
|
||||
TargetGasLimitFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
|
||||
MinerLegacyThreadsFlag = cli.IntFlag{
|
||||
Name: "minerthreads",
|
||||
Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)",
|
||||
Value: 0,
|
||||
}
|
||||
MinerNotifyFlag = cli.StringFlag{
|
||||
Name: "miner.notify",
|
||||
Usage: "Comma separated HTTP URL list to notify of new work packages",
|
||||
}
|
||||
MinerGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "miner.gastarget",
|
||||
Usage: "Target gas floor for mined blocks",
|
||||
Value: params.GenesisGasLimit,
|
||||
}
|
||||
EtherbaseFlag = cli.StringFlag{
|
||||
Name: "etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account created)",
|
||||
MinerLegacyGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
|
||||
Value: params.GenesisGasLimit,
|
||||
}
|
||||
MinerGasPriceFlag = BigFlag{
|
||||
Name: "miner.gasprice",
|
||||
Usage: "Minimal gas price for mining a transactions",
|
||||
Value: eth.DefaultConfig.MinerGasPrice,
|
||||
}
|
||||
MinerLegacyGasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price for mining a transactions (deprecated, use --miner.gasprice)",
|
||||
Value: eth.DefaultConfig.MinerGasPrice,
|
||||
}
|
||||
MinerEtherbaseFlag = cli.StringFlag{
|
||||
Name: "miner.etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account)",
|
||||
Value: "0",
|
||||
}
|
||||
GasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price to accept for mining a transactions",
|
||||
Value: eth.DefaultConfig.GasPrice,
|
||||
MinerLegacyEtherbaseFlag = cli.StringFlag{
|
||||
Name: "etherbase",
|
||||
Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)",
|
||||
Value: "0",
|
||||
}
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
MinerExtraDataFlag = cli.StringFlag{
|
||||
Name: "miner.extradata",
|
||||
Usage: "Block extra data set by the miner (default = client version)",
|
||||
}
|
||||
MinerLegacyExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)",
|
||||
}
|
||||
MinerRecommitIntervalFlag = cli.DurationFlag{
|
||||
Name: "miner.recommit",
|
||||
Usage: "Time interval to recreate the block being mined.",
|
||||
Value: eth.DefaultConfig.MinerRecommit,
|
||||
}
|
||||
// Account settings
|
||||
UnlockedAccountFlag = cli.StringFlag{
|
||||
Name: "unlock",
|
||||
|
@ -810,10 +838,19 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
|
|||
// setEtherbase retrieves the etherbase either from the directly specified
|
||||
// command line flags or from the keystore if CLI indexed.
|
||||
func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EtherbaseFlag.Name) {
|
||||
account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name))
|
||||
// Extract the current etherbase, new flag overriding legacy one
|
||||
var etherbase string
|
||||
if ctx.GlobalIsSet(MinerLegacyEtherbaseFlag.Name) {
|
||||
etherbase = ctx.GlobalString(MinerLegacyEtherbaseFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) {
|
||||
etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name)
|
||||
}
|
||||
// Convert the etherbase into an address and configure it
|
||||
if etherbase != "" {
|
||||
account, err := MakeAddress(ks, etherbase)
|
||||
if err != nil {
|
||||
Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
|
||||
Fatalf("Invalid miner etherbase: %v", err)
|
||||
}
|
||||
cfg.Etherbase = account.Address
|
||||
}
|
||||
|
@ -844,7 +881,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
|||
setBootstrapNodes(ctx, cfg)
|
||||
setBootstrapNodesV5(ctx, cfg)
|
||||
|
||||
lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light"
|
||||
lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light"
|
||||
lightServer := ctx.GlobalInt(LightServFlag.Name) != 0
|
||||
lightPeers := ctx.GlobalInt(LightPeersFlag.Name)
|
||||
|
||||
|
@ -944,6 +981,16 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
|||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolLocalsFlag.Name) {
|
||||
locals := strings.Split(ctx.GlobalString(TxPoolLocalsFlag.Name), ",")
|
||||
for _, account := range locals {
|
||||
if trimmed := strings.TrimSpace(account); !common.IsHexAddress(trimmed) {
|
||||
Fatalf("Invalid account in --txpool.locals: %s", trimmed)
|
||||
} else {
|
||||
cfg.Locals = append(cfg.Locals, common.HexToAddress(account))
|
||||
}
|
||||
}
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolNoLocalsFlag.Name) {
|
||||
cfg.NoLocals = ctx.GlobalBool(TxPoolNoLocalsFlag.Name)
|
||||
}
|
||||
|
@ -1049,8 +1096,6 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
|||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, LightModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, SyncModeFlag, "light")
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
|
@ -1059,13 +1104,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
|
||||
switch {
|
||||
case ctx.GlobalIsSet(SyncModeFlag.Name):
|
||||
if ctx.GlobalIsSet(SyncModeFlag.Name) {
|
||||
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
||||
case ctx.GlobalBool(FastSyncFlag.Name):
|
||||
cfg.SyncMode = downloader.FastSync
|
||||
case ctx.GlobalBool(LightModeFlag.Name):
|
||||
cfg.SyncMode = downloader.LightSync
|
||||
}
|
||||
if ctx.GlobalIsSet(LightServFlag.Name) {
|
||||
cfg.LightServ = ctx.GlobalInt(LightServFlag.Name)
|
||||
|
@ -1090,17 +1130,32 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
|
||||
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
|
||||
}
|
||||
if ctx.GlobalIsSet(DocRootFlag.Name) {
|
||||
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(ExtraDataFlag.Name) {
|
||||
cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name))
|
||||
if ctx.GlobalIsSet(MinerLegacyExtraDataFlag.Name) {
|
||||
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerLegacyExtraDataFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name)
|
||||
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
|
||||
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
|
||||
cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(VMEnableDebugFlag.Name) {
|
||||
// TODO(fjl): force-enable this in --dev mode
|
||||
|
@ -1142,8 +1197,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
log.Info("Using developer account", "address", developer.Address)
|
||||
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address)
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
cfg.GasPrice = big.NewInt(1)
|
||||
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
// TODO(fjl): move trie cache generations into config
|
||||
|
@ -1217,7 +1272,10 @@ func RegisterEthStatsService(stack *node.Node, url string) {
|
|||
// SetupNetwork configures the system for either the main net or some test network.
|
||||
func SetupNetwork(ctx *cli.Context) {
|
||||
// TODO(fjl): move target gas limit into config
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
|
||||
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
|
@ -1248,7 +1306,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
|||
handles = makeDatabaseHandles()
|
||||
)
|
||||
name := "chaindata"
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
|
||||
name = "lightchaindata"
|
||||
}
|
||||
chainDb, err := stack.OpenDatabase(name, cache, handles)
|
||||
|
@ -1293,7 +1351,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
|||
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
||||
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
}
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
|
|
|
@ -30,3 +30,34 @@ type AbsTime time.Duration
|
|||
func Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Add returns t + d.
|
||||
func (t AbsTime) Add(d time.Duration) AbsTime {
|
||||
return t + AbsTime(d)
|
||||
}
|
||||
|
||||
// Clock interface makes it possible to replace the monotonic system clock with
|
||||
// a simulated clock.
|
||||
type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
type System struct{}
|
||||
|
||||
// Now implements Clock.
|
||||
func (System) Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
func (System) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mclock
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Simulated implements a virtual Clock for reproducible time-sensitive tests. It
|
||||
// simulates a scheduler on a virtual timescale where actual processing takes zero time.
|
||||
//
|
||||
// The virtual clock doesn't advance on its own, call Run to advance it and execute timers.
|
||||
// Since there is no way to influence the Go scheduler, testing timeout behaviour involving
|
||||
// goroutines needs special care. A good way to test such timeouts is as follows: First
|
||||
// perform the action that is supposed to time out. Ensure that the timer you want to test
|
||||
// is created. Then run the clock until after the timeout. Finally observe the effect of
|
||||
// the timeout using a channel or semaphore.
|
||||
type Simulated struct {
|
||||
now AbsTime
|
||||
scheduled []event
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
type event struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
}
|
||||
|
||||
// Run moves the clock by the given duration, executing all timers before that duration.
|
||||
func (s *Simulated) Run(d time.Duration) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
end := s.now + AbsTime(d)
|
||||
for len(s.scheduled) > 0 {
|
||||
ev := s.scheduled[0]
|
||||
if ev.at > end {
|
||||
break
|
||||
}
|
||||
s.now = ev.at
|
||||
ev.do()
|
||||
s.scheduled = s.scheduled[1:]
|
||||
}
|
||||
s.now = end
|
||||
}
|
||||
|
||||
func (s *Simulated) ActiveTimers() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
return len(s.scheduled)
|
||||
}
|
||||
|
||||
func (s *Simulated) WaitForTimers(n int) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
for len(s.scheduled) < n {
|
||||
s.cond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// Now implements Clock.
|
||||
func (s *Simulated) Now() AbsTime {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
return s.now
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
func (s *Simulated) Sleep(d time.Duration) {
|
||||
<-s.After(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.insert(d, func() {
|
||||
after <- (time.Time{}).Add(time.Duration(s.now))
|
||||
})
|
||||
return after
|
||||
}
|
||||
|
||||
func (s *Simulated) insert(d time.Duration, do func()) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
at := s.now + AbsTime(d)
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if at < s.scheduled[m].at {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
s.scheduled = append(s.scheduled, event{})
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
s.scheduled[l] = event{do: do, at: at}
|
||||
s.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
if s.cond == nil {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
|
||||
|
||||
package prque
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
)
|
||||
|
||||
// Priority queue data structure.
|
||||
type Prque struct {
|
||||
cont *sstack
|
||||
}
|
||||
|
||||
// Creates a new priority queue.
|
||||
func New(setIndex setIndexCallback) *Prque {
|
||||
return &Prque{newSstack(setIndex)}
|
||||
}
|
||||
|
||||
// Pushes a value with a given priority into the queue, expanding if necessary.
|
||||
func (p *Prque) Push(data interface{}, priority int64) {
|
||||
heap.Push(p.cont, &item{data, priority})
|
||||
}
|
||||
|
||||
// Pops the value with the greates priority off the stack and returns it.
|
||||
// Currently no shrinking is done.
|
||||
func (p *Prque) Pop() (interface{}, int64) {
|
||||
item := heap.Pop(p.cont).(*item)
|
||||
return item.value, item.priority
|
||||
}
|
||||
|
||||
// Pops only the item from the queue, dropping the associated priority value.
|
||||
func (p *Prque) PopItem() interface{} {
|
||||
return heap.Pop(p.cont).(*item).value
|
||||
}
|
||||
|
||||
// Remove removes the element with the given index.
|
||||
func (p *Prque) Remove(i int) interface{} {
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return heap.Remove(p.cont, i)
|
||||
}
|
||||
|
||||
// Checks whether the priority queue is empty.
|
||||
func (p *Prque) Empty() bool {
|
||||
return p.cont.Len() == 0
|
||||
}
|
||||
|
||||
// Returns the number of element in the priority queue.
|
||||
func (p *Prque) Size() int {
|
||||
return p.cont.Len()
|
||||
}
|
||||
|
||||
// Clears the contents of the priority queue.
|
||||
func (p *Prque) Reset() {
|
||||
*p = *New(p.cont.setIndex)
|
||||
}
|
106
vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go
generated
vendored
Executable file
106
vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go
generated
vendored
Executable file
|
@ -0,0 +1,106 @@
|
|||
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
|
||||
|
||||
package prque
|
||||
|
||||
// The size of a block of data
|
||||
const blockSize = 4096
|
||||
|
||||
// A prioritized item in the sorted stack.
|
||||
//
|
||||
// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0.
|
||||
// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63.
|
||||
type item struct {
|
||||
value interface{}
|
||||
priority int64
|
||||
}
|
||||
|
||||
// setIndexCallback is called when the element is moved to a new index.
|
||||
// Providing setIndexCallback is optional, it is needed only if the application needs
|
||||
// to delete elements other than the top one.
|
||||
type setIndexCallback func(a interface{}, i int)
|
||||
|
||||
// Internal sortable stack data structure. Implements the Push and Pop ops for
|
||||
// the stack (heap) functionality and the Len, Less and Swap methods for the
|
||||
// sortability requirements of the heaps.
|
||||
type sstack struct {
|
||||
setIndex setIndexCallback
|
||||
size int
|
||||
capacity int
|
||||
offset int
|
||||
|
||||
blocks [][]*item
|
||||
active []*item
|
||||
}
|
||||
|
||||
// Creates a new, empty stack.
|
||||
func newSstack(setIndex setIndexCallback) *sstack {
|
||||
result := new(sstack)
|
||||
result.setIndex = setIndex
|
||||
result.active = make([]*item, blockSize)
|
||||
result.blocks = [][]*item{result.active}
|
||||
result.capacity = blockSize
|
||||
return result
|
||||
}
|
||||
|
||||
// Pushes a value onto the stack, expanding it if necessary. Required by
|
||||
// heap.Interface.
|
||||
func (s *sstack) Push(data interface{}) {
|
||||
if s.size == s.capacity {
|
||||
s.active = make([]*item, blockSize)
|
||||
s.blocks = append(s.blocks, s.active)
|
||||
s.capacity += blockSize
|
||||
s.offset = 0
|
||||
} else if s.offset == blockSize {
|
||||
s.active = s.blocks[s.size/blockSize]
|
||||
s.offset = 0
|
||||
}
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(data.(*item).value, s.size)
|
||||
}
|
||||
s.active[s.offset] = data.(*item)
|
||||
s.offset++
|
||||
s.size++
|
||||
}
|
||||
|
||||
// Pops a value off the stack and returns it. Currently no shrinking is done.
|
||||
// Required by heap.Interface.
|
||||
func (s *sstack) Pop() (res interface{}) {
|
||||
s.size--
|
||||
s.offset--
|
||||
if s.offset < 0 {
|
||||
s.offset = blockSize - 1
|
||||
s.active = s.blocks[s.size/blockSize]
|
||||
}
|
||||
res, s.active[s.offset] = s.active[s.offset], nil
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(res.(*item).value, -1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the length of the stack. Required by sort.Interface.
|
||||
func (s *sstack) Len() int {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Compares the priority of two elements of the stack (higher is first).
|
||||
// Required by sort.Interface.
|
||||
func (s *sstack) Less(i, j int) bool {
|
||||
return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0
|
||||
}
|
||||
|
||||
// Swaps two elements in the stack. Required by sort.Interface.
|
||||
func (s *sstack) Swap(i, j int) {
|
||||
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
|
||||
a, b := s.blocks[jb][jo], s.blocks[ib][io]
|
||||
if s.setIndex != nil {
|
||||
s.setIndex(a.value, i)
|
||||
s.setIndex(b.value, j)
|
||||
}
|
||||
s.blocks[ib][io], s.blocks[jb][jo] = a, b
|
||||
}
|
||||
|
||||
// Resets the stack, effectively clearing its contents.
|
||||
func (s *sstack) Reset() {
|
||||
*s = *newSstack(s.setIndex)
|
||||
}
|
|
@ -387,22 +387,23 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
|||
break
|
||||
}
|
||||
}
|
||||
// If we're at block zero, make a snapshot
|
||||
if number == 0 {
|
||||
genesis := chain.GetHeaderByNumber(0)
|
||||
if err := c.VerifyHeader(chain, genesis, false); err != nil {
|
||||
return nil, err
|
||||
// If we're at an checkpoint block, make a snapshot if it's known
|
||||
if number%c.config.Epoch == 0 {
|
||||
checkpoint := chain.GetHeaderByNumber(number)
|
||||
if checkpoint != nil {
|
||||
hash := checkpoint.Hash()
|
||||
|
||||
signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength)
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, c.signatures, number, hash, signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
|
||||
break
|
||||
}
|
||||
signers := make([]common.Address, (len(genesis.Extra)-extraVanity-extraSeal)/common.AddressLength)
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Trace("Stored genesis voting snapshot to disk")
|
||||
break
|
||||
}
|
||||
// No snapshot for this header, gather the header and move backward
|
||||
var header *types.Header
|
||||
|
@ -672,6 +673,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
|||
return new(big.Int).Set(diffNoTurn)
|
||||
}
|
||||
|
||||
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
|
||||
func (c *Clique) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC API to allow
|
||||
// controlling the signer voting.
|
||||
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
|
|
|
@ -96,6 +96,9 @@ type Engine interface {
|
|||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainReader) []rpc.API
|
||||
|
||||
// Close terminates any background threads maintained by the consensus engine.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// PoW is a consensus engine based on proof-of-work.
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash // Make sure the mode of ethash is normal.
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0] - 32 bytes hex encoded current block header pow-hash
|
||||
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (api *API) GetWork() ([3]string, error) {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return [3]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
var (
|
||||
workCh = make(chan [3]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
|
||||
select {
|
||||
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.exitCh:
|
||||
return [3]string{}, errEthashStopped
|
||||
}
|
||||
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
case err := <-errc:
|
||||
return [3]string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitWork can be used by external miner to submit their POW solution.
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate.
|
||||
// This enables the node to report the combined hash rate of all miners
|
||||
// which submit work through this node.
|
||||
//
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetHashrate returns the current hashrate for local CPU miner and remote miner.
|
||||
func (api *API) GetHashrate() uint64 {
|
||||
return uint64(api.ethash.Hashrate())
|
||||
}
|
|
@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
|||
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||
// the PoW difficulty requirements.
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
return ethash.verifySeal(chain, header, false)
|
||||
}
|
||||
|
||||
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
|
||||
// either using the usual ethash cache for it, or alternatively using a full DAG
|
||||
// to make remote mining fast.
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
|
@ -471,29 +478,52 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
|||
}
|
||||
// If we're running a shared PoW, delegate verification to it
|
||||
if ethash.shared != nil {
|
||||
return ethash.shared.VerifySeal(chain, header)
|
||||
return ethash.shared.verifySeal(chain, header, fulldag)
|
||||
}
|
||||
// Ensure that we have a valid difficulty for the block
|
||||
if header.Difficulty.Sign() <= 0 {
|
||||
return errInvalidDifficulty
|
||||
}
|
||||
// Recompute the digest and PoW value and verify against the header
|
||||
// Recompute the digest and PoW values
|
||||
number := header.Number.Uint64()
|
||||
|
||||
cache := ethash.cache(number)
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays live
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
var (
|
||||
digest []byte
|
||||
result []byte
|
||||
)
|
||||
// If fast-but-heavy PoW verification was requested, use an ethash dataset
|
||||
if fulldag {
|
||||
dataset := ethash.dataset(number, true)
|
||||
if dataset.generated() {
|
||||
digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
|
||||
// until after the call to hashimotoFull so it's not unmapped while being used.
|
||||
runtime.KeepAlive(dataset)
|
||||
} else {
|
||||
// Dataset not yet generated, don't hang, use a cache instead
|
||||
fulldag = false
|
||||
}
|
||||
}
|
||||
// If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
|
||||
if !fulldag {
|
||||
cache := ethash.cache(number)
|
||||
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
}
|
||||
// Verify the calculated values against the ones provided in the header
|
||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||
return errInvalidMixDigest
|
||||
}
|
||||
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
target := new(big.Int).Div(two256, header.Difficulty)
|
||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||
return errInvalidPoW
|
||||
}
|
||||
|
|
|
@ -29,11 +29,14 @@ import (
|
|||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
mmap "github.com/edsrzf/mmap-go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -43,11 +46,11 @@ import (
|
|||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||
|
||||
var (
|
||||
// maxUint256 is a big integer representing 2^256-1
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
// two256 is a big integer representing 2^256
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
|
@ -279,6 +282,7 @@ type dataset struct {
|
|||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
dataset []uint32 // The actual cache data content
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
done uint32 // Atomic flag to determine generation status
|
||||
}
|
||||
|
||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||
|
@ -290,6 +294,9 @@ func newDataset(epoch uint64) interface{} {
|
|||
// generate ensures that the dataset content is generated before use.
|
||||
func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
d.once.Do(func() {
|
||||
// Mark the dataset generated after we're done. This is needed for remote
|
||||
defer atomic.StoreUint32(&d.done, 1)
|
||||
|
||||
csize := cacheSize(d.epoch*epochLength + 1)
|
||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||
seed := seedHash(d.epoch*epochLength + 1)
|
||||
|
@ -304,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
|||
|
||||
d.dataset = make([]uint32, dsize/4)
|
||||
generateDataset(d.dataset, d.epoch, cache)
|
||||
|
||||
return
|
||||
}
|
||||
// Disk storage is needed, this will get fancy
|
||||
var endian string
|
||||
|
@ -346,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
|||
})
|
||||
}
|
||||
|
||||
// generated returns whether this particular dataset finished generating already
|
||||
// or not (it may not have been started at all). This is useful for remote miners
|
||||
// to default to verification caches instead of blocking on DAG generations.
|
||||
func (d *dataset) generated() bool {
|
||||
return atomic.LoadUint32(&d.done) == 1
|
||||
}
|
||||
|
||||
// finalizer closes any file handlers and memory maps open.
|
||||
func (d *dataset) finalizer() {
|
||||
if d.mmap != nil {
|
||||
|
@ -389,6 +405,30 @@ type Config struct {
|
|||
PowMode Mode
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [3]string
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
|
@ -403,16 +443,28 @@ type Ethash struct {
|
|||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *types.Block // Notification channel to push new work to remote sealer
|
||||
resultCh chan *types.Block // Channel used by mining threads to return result
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
exitCh chan chan error // Notification channel to exiting backend threads
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme.
|
||||
func New(config Config) *Ethash {
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string) *Ethash {
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
|
@ -423,19 +475,43 @@ func New(config Config) *Ethash {
|
|||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
return &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||
// purposes.
|
||||
func NewTester() *Ethash {
|
||||
return New(Config{CachesInMem: 1, PowMode: ModeTest})
|
||||
func NewTester(notify []string) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||
|
@ -489,6 +565,22 @@ func NewShared() *Ethash {
|
|||
return &Ethash{shared: sharedEthash}
|
||||
}
|
||||
|
||||
// Close closes the exit channel to notify all backend threads exiting.
|
||||
func (ethash *Ethash) Close() error {
|
||||
var err error
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.exitCh == nil {
|
||||
return
|
||||
}
|
||||
errc := make(chan error)
|
||||
ethash.exitCh <- errc
|
||||
err = <-errc
|
||||
close(ethash.exitCh)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cache tries to retrieve a verification cache for the specified block number
|
||||
// by first checking against a list of in-memory caches, then against caches
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
|
@ -511,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache {
|
|||
// dataset tries to retrieve a mining dataset for the specified block number
|
||||
// by first checking against a list of in-memory datasets, then against DAGs
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
func (ethash *Ethash) dataset(block uint64) *dataset {
|
||||
//
|
||||
// If async is specified, not only the future but the current DAG is also
|
||||
// generates on a background thread.
|
||||
func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
|
||||
// Retrieve the requested ethash dataset
|
||||
epoch := block / epochLength
|
||||
currentI, futureI := ethash.datasets.get(epoch)
|
||||
current := currentI.(*dataset)
|
||||
|
||||
// Wait for generation finish.
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
// If async is specified, generate everything in a background thread
|
||||
if async && !current.generated() {
|
||||
go func() {
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
// If we need a new future dataset, now's a good time to regenerate it.
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// Either blocking generation was requested, or already done
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
|
@ -561,14 +667,44 @@ func (ethash *Ethash) SetThreads(threads int) {
|
|||
|
||||
// Hashrate implements PoW, returning the measured rate of the search invocations
|
||||
// per second over the last minute.
|
||||
// Note the returned hashrate includes local hashrate, but also includes the total
|
||||
// hashrate of all remote miner.
|
||||
func (ethash *Ethash) Hashrate() float64 {
|
||||
return ethash.hashrate.Rate1()
|
||||
// Short circuit if we are run the ethash in normal/test mode.
|
||||
if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.fetchRateCh <- res:
|
||||
case <-ethash.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
||||
// Gather total submitted hash rate of remote sealers.
|
||||
return ethash.hashrate.Rate1() + float64(<-res)
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
|
||||
// that is empty.
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
return nil
|
||||
// In order to ensure backward compatibility, we exposes ethash RPC APIs
|
||||
// to both eth and ethash namespaces.
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "eth",
|
||||
Version: "1.0",
|
||||
Service: &API{ethash},
|
||||
Public: true,
|
||||
},
|
||||
{
|
||||
Namespace: "ethash",
|
||||
Version: "1.0",
|
||||
Service: &API{ethash},
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SeedHash is the seed to use for generating a verification cache and the mining
|
||||
|
|
|
@ -17,12 +17,17 @@
|
|||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
|
@ -30,6 +35,11 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoMiningWork = errors.New("no mining work available yet")
|
||||
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
|
||||
)
|
||||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||
|
@ -45,7 +55,6 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
|||
}
|
||||
// Create a runner and the multiple search threads it directs
|
||||
abort := make(chan struct{})
|
||||
found := make(chan *types.Block)
|
||||
|
||||
ethash.lock.Lock()
|
||||
threads := ethash.threads
|
||||
|
@ -64,12 +73,16 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
|||
if threads < 0 {
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- block
|
||||
}
|
||||
var pend sync.WaitGroup
|
||||
for i := 0; i < threads; i++ {
|
||||
pend.Add(1)
|
||||
go func(id int, nonce uint64) {
|
||||
defer pend.Done()
|
||||
ethash.mine(block, id, nonce, abort, found)
|
||||
ethash.mine(block, id, nonce, abort, ethash.resultCh)
|
||||
}(i, uint64(ethash.rand.Int63()))
|
||||
}
|
||||
// Wait until sealing is terminated or a nonce is found
|
||||
|
@ -78,7 +91,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
|||
case <-stop:
|
||||
// Outside abort, stop all miner threads
|
||||
close(abort)
|
||||
case result = <-found:
|
||||
case result = <-ethash.resultCh:
|
||||
// One of the threads found a block, abort all others
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
|
@ -99,9 +112,9 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
|||
var (
|
||||
header = block.Header()
|
||||
hash = header.HashNoNonce().Bytes()
|
||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
target = new(big.Int).Div(two256, header.Difficulty)
|
||||
number = header.Number.Uint64()
|
||||
dataset = ethash.dataset(number)
|
||||
dataset = ethash.dataset(number, false)
|
||||
)
|
||||
// Start generating random nonces until we abort or find a good one
|
||||
var (
|
||||
|
@ -150,3 +163,164 @@ search:
|
|||
// during sealing so it's not unmapped while being read.
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
|
||||
currentBlock *types.Block
|
||||
currentWork [3]string
|
||||
|
||||
notifyTransport = &http.Transport{}
|
||||
notifyClient = &http.Client{
|
||||
Transport: notifyTransport,
|
||||
Timeout: time.Second,
|
||||
}
|
||||
notifyReqs = make([]*http.Request, len(notify))
|
||||
)
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
notifyWork := func() {
|
||||
work := currentWork
|
||||
blob, _ := json.Marshal(work)
|
||||
|
||||
for i, url := range notify {
|
||||
// Terminate any previously pending request and create the new work
|
||||
if notifyReqs[i] != nil {
|
||||
notifyTransport.CancelRequest(notifyReqs[i])
|
||||
}
|
||||
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
|
||||
notifyReqs[i].Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Push the new work concurrently to all the remote nodes
|
||||
go func(req *http.Request, url string) {
|
||||
res, err := notifyClient.Do(req)
|
||||
if err != nil {
|
||||
log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
|
||||
res.Body.Close()
|
||||
}
|
||||
}(notifyReqs[i], url)
|
||||
}
|
||||
}
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := block.HashNoNonce()
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
currentBlock = block
|
||||
works[hash] = block
|
||||
}
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
|
||||
// Make sure the work submitted is present
|
||||
block := works[hash]
|
||||
if block == nil {
|
||||
log.Info("Work submitted but none pending", "hash", hash)
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
|
||||
return false
|
||||
}
|
||||
// Make sure the result channel is created.
|
||||
if ethash.resultCh == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
select {
|
||||
case ethash.resultCh <- block.WithSeal(header):
|
||||
delete(works, hash)
|
||||
return true
|
||||
default:
|
||||
log.Info("Work submitted is stale", "hash", hash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case block := <-ethash.workCh:
|
||||
if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
|
||||
// Start new round mining, throw out all previous work.
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
}
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
makeWork(block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
||||
case work := <-ethash.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- currentWork
|
||||
}
|
||||
|
||||
case result := <-ethash.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-ethash.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-ethash.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
req <- total
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(rates, id)
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
errc <- nil
|
||||
log.Trace("Ethash remote sealer is exiting")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ func (c *Console) Interactive() {
|
|||
input = "" // Current user input
|
||||
scheduler = make(chan string) // Channel to send the next prompt on and receive the input
|
||||
)
|
||||
// Start a goroutine to listen for promt requests and send back inputs
|
||||
// Start a goroutine to listen for prompt requests and send back inputs
|
||||
go func() {
|
||||
for {
|
||||
// Read the next user input
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
// Only this reader may be used for input because it keeps an internal buffer.
|
||||
var Stdin = newTerminalPrompter()
|
||||
|
||||
// UserPrompter defines the methods needed by the console to promt the user for
|
||||
// UserPrompter defines the methods needed by the console to prompt the user for
|
||||
// various types of inputs.
|
||||
type UserPrompter interface {
|
||||
// PromptInput displays the given prompt to the user and requests some textual
|
||||
|
|
|
@ -40,7 +40,7 @@ var (
|
|||
)
|
||||
|
||||
func main() {
|
||||
backend := backends.NewSimulatedBackend(testAlloc)
|
||||
backend := backends.NewSimulatedBackend(testAlloc, uint64(100000000))
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
|
||||
// Deploy the contract, get the code.
|
||||
|
|
|
@ -899,9 +899,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||
if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
||||
return NonStatTy, err
|
||||
}
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteBlock(batch, block)
|
||||
rawdb.WriteBlock(bc.db, block)
|
||||
|
||||
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
|
||||
if err != nil {
|
||||
|
@ -955,6 +953,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write other block data using a batch.
|
||||
batch := bc.db.NewBatch()
|
||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
|
||||
|
||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -37,11 +38,11 @@ import (
|
|||
type ChainIndexerBackend interface {
|
||||
// Reset initiates the processing of a new chain segment, potentially terminating
|
||||
// any partially completed operations (in case of a reorg).
|
||||
Reset(section uint64, prevHead common.Hash) error
|
||||
Reset(ctx context.Context, section uint64, prevHead common.Hash) error
|
||||
|
||||
// Process crunches through the next header in the chain segment. The caller
|
||||
// will ensure a sequential order of headers.
|
||||
Process(header *types.Header)
|
||||
Process(ctx context.Context, header *types.Header) error
|
||||
|
||||
// Commit finalizes the section metadata and stores it into the database.
|
||||
Commit() error
|
||||
|
@ -71,9 +72,11 @@ type ChainIndexer struct {
|
|||
backend ChainIndexerBackend // Background processor generating the index data content
|
||||
children []*ChainIndexer // Child indexers to cascade chain updates to
|
||||
|
||||
active uint32 // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
active uint32 // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
|
||||
sectionSize uint64 // Number of blocks in a single chain segment to process
|
||||
confirmsReq uint64 // Number of confirmations before processing a completed segment
|
||||
|
@ -105,6 +108,8 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
|
|||
}
|
||||
// Initialize database dependent fields and start the updater
|
||||
c.loadValidSections()
|
||||
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
go c.updateLoop()
|
||||
|
||||
return c
|
||||
|
@ -138,6 +143,8 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain) {
|
|||
func (c *ChainIndexer) Close() error {
|
||||
var errs []error
|
||||
|
||||
c.ctxCancel()
|
||||
|
||||
// Tear down the primary update loop
|
||||
errc := make(chan error)
|
||||
c.quit <- errc
|
||||
|
@ -297,6 +304,12 @@ func (c *ChainIndexer) updateLoop() {
|
|||
c.lock.Unlock()
|
||||
newHead, err := c.processSection(section, oldHead)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
<-c.quit <- nil
|
||||
return
|
||||
default:
|
||||
}
|
||||
c.log.Error("Section processing failed", "error", err)
|
||||
}
|
||||
c.lock.Lock()
|
||||
|
@ -344,7 +357,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
|||
|
||||
// Reset and partial processing
|
||||
|
||||
if err := c.backend.Reset(section, lastHead); err != nil {
|
||||
if err := c.backend.Reset(c.ctx, section, lastHead); err != nil {
|
||||
c.setValidSections(0)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
@ -360,11 +373,12 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
|
|||
} else if header.ParentHash != lastHead {
|
||||
return common.Hash{}, fmt.Errorf("chain reorged during section processing")
|
||||
}
|
||||
c.backend.Process(header)
|
||||
if err := c.backend.Process(c.ctx, header); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
lastHead = header.Hash()
|
||||
}
|
||||
if err := c.backend.Commit(); err != nil {
|
||||
c.log.Error("Section commit failed", "error", err)
|
||||
return common.Hash{}, err
|
||||
}
|
||||
return lastHead, nil
|
||||
|
|
|
@ -29,9 +29,6 @@ type PendingLogsEvent struct {
|
|||
Logs []*types.Log
|
||||
}
|
||||
|
||||
// PendingStateEvent is posted pre mining and notifies of pending state changes.
|
||||
type PendingStateEvent struct{}
|
||||
|
||||
// NewMinedBlockEvent is posted when a block has been imported.
|
||||
type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// senderCacher is a concurrent tranaction sender recoverer anc cacher.
|
||||
// senderCacher is a concurrent transaction sender recoverer anc cacher.
|
||||
var senderCacher = newTxSenderCacher(runtime.NumCPU())
|
||||
|
||||
// txSenderCacherRequest is a request for recovering transaction senders with a
|
||||
|
@ -45,7 +45,7 @@ type txSenderCacher struct {
|
|||
}
|
||||
|
||||
// newTxSenderCacher creates a new transaction sender background cacher and starts
|
||||
// as many procesing goroutines as allowed by the GOMAXPROCS on construction.
|
||||
// as many processing goroutines as allowed by the GOMAXPROCS on construction.
|
||||
func newTxSenderCacher(threads int) *txSenderCacher {
|
||||
cacher := &txSenderCacher{
|
||||
tasks: make(chan *txSenderCacherRequest, threads),
|
||||
|
|
|
@ -123,9 +123,10 @@ type blockChain interface {
|
|||
|
||||
// TxPoolConfig are the configuration parameters of the transaction pool.
|
||||
type TxPoolConfig struct {
|
||||
NoLocals bool // Whether local transaction handling should be disabled
|
||||
Journal string // Journal of local transactions to survive node restarts
|
||||
Rejournal time.Duration // Time interval to regenerate the local transaction journal
|
||||
Locals []common.Address // Addresses that should be treated by default as local
|
||||
NoLocals bool // Whether local transaction handling should be disabled
|
||||
Journal string // Journal of local transactions to survive node restarts
|
||||
Rejournal time.Duration // Time interval to regenerate the local transaction journal
|
||||
|
||||
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
|
||||
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
|
||||
|
@ -231,6 +232,10 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
|
|||
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
||||
}
|
||||
pool.locals = newAccountSet(pool.signer)
|
||||
for _, addr := range config.Locals {
|
||||
log.Info("Setting new local account", "address", addr)
|
||||
pool.locals.add(addr)
|
||||
}
|
||||
pool.priced = newTxPricedList(pool.all)
|
||||
pool.reset(nil, chain.CurrentBlock().Header())
|
||||
|
||||
|
@ -534,6 +539,14 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
|||
return pending, nil
|
||||
}
|
||||
|
||||
// Locals retrieves the accounts currently considered local by the pool.
|
||||
func (pool *TxPool) Locals() []common.Address {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
return pool.locals.flatten()
|
||||
}
|
||||
|
||||
// local retrieves all currently known local transactions, groupped by origin
|
||||
// account and sorted by nonce. The returned transaction set is a copy and can be
|
||||
// freely modified by calling code.
|
||||
|
@ -665,7 +678,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
|
|||
}
|
||||
// Mark local addresses and journal local transactions
|
||||
if local {
|
||||
pool.locals.add(from)
|
||||
if !pool.locals.contains(from) {
|
||||
log.Info("Setting new local account", "address", from)
|
||||
pool.locals.add(from)
|
||||
}
|
||||
}
|
||||
pool.journalTx(from, tx)
|
||||
|
||||
|
@ -1138,6 +1154,7 @@ func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||
type accountSet struct {
|
||||
accounts map[common.Address]struct{}
|
||||
signer types.Signer
|
||||
cache *[]common.Address
|
||||
}
|
||||
|
||||
// newAccountSet creates a new address set with an associated signer for sender
|
||||
|
@ -1167,6 +1184,20 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool {
|
|||
// add inserts a new address into the set to track.
|
||||
func (as *accountSet) add(addr common.Address) {
|
||||
as.accounts[addr] = struct{}{}
|
||||
as.cache = nil
|
||||
}
|
||||
|
||||
// flatten returns the list of addresses within this set, also caching it for later
|
||||
// reuse. The returned slice should not be changed!
|
||||
func (as *accountSet) flatten() []common.Address {
|
||||
if as.cache == nil {
|
||||
accounts := make([]common.Address, 0, len(as.accounts))
|
||||
for account := range as.accounts {
|
||||
accounts = append(accounts, account)
|
||||
}
|
||||
as.cache = &accounts
|
||||
}
|
||||
return *as.cache
|
||||
}
|
||||
|
||||
// txLookup is used internally by TxPool to track transactions while allowing lookup without
|
||||
|
|
|
@ -119,7 +119,7 @@ func isProtectedV(V *big.Int) bool {
|
|||
v := V.Uint64()
|
||||
return v != 27 && v != 28
|
||||
}
|
||||
// anything not 27 or 28 are considered unprotected
|
||||
// anything not 27 or 28 is considered protected
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||
precompiles = PrecompiledContractsByzantium
|
||||
}
|
||||
if precompiles[addr] == nil && evm.ChainConfig().IsEIP158(evm.BlockNumber) && value.Sign() == 0 {
|
||||
// Calling a non existing account, don't do antything, but ping the tracer
|
||||
// Calling a non existing account, don't do anything, but ping the tracer
|
||||
if evm.vmConfig.Debug && evm.depth == 0 {
|
||||
evm.vmConfig.Tracer.CaptureStart(caller.Address(), addr, false, input, gas, value)
|
||||
evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil)
|
||||
|
@ -427,7 +427,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
|||
|
||||
// Create2 creates a new contract using code as deployment code.
|
||||
//
|
||||
// The different between Create2 with Create is Create2 uses sha3(msg.sender ++ salt ++ init_code)[12:]
|
||||
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
|
||||
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
|
||||
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
|
||||
contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code)
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2018 Péter Szilágyi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,18 +1,6 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright 2018 Péter Szilágyi. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
// +build amd64 arm64
|
||||
|
||||
|
|
|
@ -1,18 +1,6 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright 2018 Péter Szilágyi. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
|
|
|
@ -1,18 +1,6 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright 2018 Péter Szilágyi. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
// +build !amd64,!arm64
|
||||
|
||||
|
|
27
vendor/github.com/ethereum/go-ethereum/crypto/bn256/cloudflare/LICENSE
generated
vendored
Normal file
27
vendor/github.com/ethereum/go-ethereum/crypto/bn256/cloudflare/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -110,7 +110,7 @@ TEXT ·gfpMul(SB),0,$160-24
|
|||
MOVQ b+16(FP), SI
|
||||
|
||||
// Jump to a slightly different implementation if MULX isn't supported.
|
||||
CMPB runtime·support_bmi2(SB), $0
|
||||
CMPB ·hasBMI2(SB), $0
|
||||
JE nobmi2Mul
|
||||
|
||||
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
|
||||
|
|
|
@ -5,6 +5,13 @@ package bn256
|
|||
// This file contains forward declarations for the architecture-specific
|
||||
// assembly implementations of these functions, provided that they exist.
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
//nolint:varcheck
|
||||
var hasBMI2 = cpu.X86.HasBMI2
|
||||
|
||||
// go:noescape
|
||||
func gfpNeg(c, a *gfP)
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bn256 implements a particular bilinear group at the 128-bit security level.
|
||||
// Package bn256 implements a particular bilinear group.
|
||||
//
|
||||
// Bilinear groups are the basis of many of the new cryptographic protocols
|
||||
// that have been proposed over the past decade. They consist of a triplet of
|
||||
|
@ -14,6 +14,10 @@
|
|||
// Barreto-Naehrig curve as described in
|
||||
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
|
||||
// with the implementation described in that paper.
|
||||
//
|
||||
// (This package previously claimed to operate at a 128-bit security level.
|
||||
// However, recent improvements in attacks mean that is no longer true. See
|
||||
// https://moderncrypto.org/mail-archive/curves/2016/000740.html.)
|
||||
package bn256
|
||||
|
||||
import (
|
||||
|
@ -50,8 +54,8 @@ func RandomG1(r io.Reader) (*big.Int, *G1, error) {
|
|||
return k, new(G1).ScalarBaseMult(k), nil
|
||||
}
|
||||
|
||||
func (g *G1) String() string {
|
||||
return "bn256.G1" + g.p.String()
|
||||
func (e *G1) String() string {
|
||||
return "bn256.G1" + e.p.String()
|
||||
}
|
||||
|
||||
// CurvePoints returns p's curve points in big integer
|
||||
|
@ -98,15 +102,19 @@ func (e *G1) Neg(a *G1) *G1 {
|
|||
}
|
||||
|
||||
// Marshal converts n to a byte slice.
|
||||
func (n *G1) Marshal() []byte {
|
||||
n.p.MakeAffine(nil)
|
||||
|
||||
xBytes := new(big.Int).Mod(n.p.x, P).Bytes()
|
||||
yBytes := new(big.Int).Mod(n.p.y, P).Bytes()
|
||||
|
||||
func (e *G1) Marshal() []byte {
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
if e.p.IsInfinity() {
|
||||
return make([]byte, numBytes*2)
|
||||
}
|
||||
|
||||
e.p.MakeAffine(nil)
|
||||
|
||||
xBytes := new(big.Int).Mod(e.p.x, P).Bytes()
|
||||
yBytes := new(big.Int).Mod(e.p.y, P).Bytes()
|
||||
|
||||
ret := make([]byte, numBytes*2)
|
||||
copy(ret[1*numBytes-len(xBytes):], xBytes)
|
||||
copy(ret[2*numBytes-len(yBytes):], yBytes)
|
||||
|
@ -175,8 +183,8 @@ func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
|||
return k, new(G2).ScalarBaseMult(k), nil
|
||||
}
|
||||
|
||||
func (g *G2) String() string {
|
||||
return "bn256.G2" + g.p.String()
|
||||
func (e *G2) String() string {
|
||||
return "bn256.G2" + e.p.String()
|
||||
}
|
||||
|
||||
// CurvePoints returns the curve points of p which includes the real
|
||||
|
@ -216,6 +224,13 @@ func (e *G2) Add(a, b *G2) *G2 {
|
|||
|
||||
// Marshal converts n into a byte slice.
|
||||
func (n *G2) Marshal() []byte {
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
if n.p.IsInfinity() {
|
||||
return make([]byte, numBytes*4)
|
||||
}
|
||||
|
||||
n.p.MakeAffine(nil)
|
||||
|
||||
xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes()
|
||||
|
@ -223,9 +238,6 @@ func (n *G2) Marshal() []byte {
|
|||
yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes()
|
||||
yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes()
|
||||
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
ret := make([]byte, numBytes*4)
|
||||
copy(ret[1*numBytes-len(xxBytes):], xxBytes)
|
||||
copy(ret[2*numBytes-len(xyBytes):], xyBytes)
|
||||
|
|
|
@ -245,11 +245,19 @@ func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoi
|
|||
return c
|
||||
}
|
||||
|
||||
// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets
|
||||
// c to 0 : 1 : 0.
|
||||
func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
|
||||
if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
|
||||
return c
|
||||
}
|
||||
|
||||
if c.IsInfinity() {
|
||||
c.x.SetInt64(0)
|
||||
c.y.SetInt64(1)
|
||||
c.z.SetInt64(0)
|
||||
c.t.SetInt64(0)
|
||||
return c
|
||||
}
|
||||
zInv := pool.Get().ModInverse(c.z, P)
|
||||
t := pool.Get().Mul(c.y, zInv)
|
||||
t.Mod(t, P)
|
||||
|
|
|
@ -225,11 +225,19 @@ func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoi
|
|||
return c
|
||||
}
|
||||
|
||||
// MakeAffine converts c to affine form and returns c. If c is ∞, then it sets
|
||||
// c to 0 : 1 : 0.
|
||||
func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
|
||||
if c.z.IsOne() {
|
||||
return c
|
||||
}
|
||||
|
||||
if c.IsInfinity() {
|
||||
c.x.SetZero()
|
||||
c.y.SetOne()
|
||||
c.z.SetZero()
|
||||
c.t.SetZero()
|
||||
return c
|
||||
}
|
||||
zInv := newGFp2(pool).Invert(c.z, pool)
|
||||
t := newGFp2(pool).Mul(c.y, zInv, pool)
|
||||
zInv2 := newGFp2(pool).Square(zInv, pool)
|
||||
|
|
|
@ -78,8 +78,8 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
|
|||
|
||||
// CreateAddress2 creates an ethereum address given the address bytes, initial
|
||||
// contract code and a salt.
|
||||
func CreateAddress2(b common.Address, salt common.Hash, code []byte) common.Address {
|
||||
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt.Bytes(), code)[12:])
|
||||
func CreateAddress2(b common.Address, salt [32]byte, code []byte) common.Address {
|
||||
return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], Keccak256(code))[12:])
|
||||
}
|
||||
|
||||
// ToECDSA creates a private key with the given D value.
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
@ -34,7 +35,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -70,16 +70,12 @@ func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
|
|||
// PublicMinerAPI provides an API to control the miner.
|
||||
// It offers only methods that operate on data that pose no security risk when it is publicly accessible.
|
||||
type PublicMinerAPI struct {
|
||||
e *Ethereum
|
||||
agent *miner.RemoteAgent
|
||||
e *Ethereum
|
||||
}
|
||||
|
||||
// NewPublicMinerAPI create a new PublicMinerAPI instance.
|
||||
func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
|
||||
agent := miner.NewRemoteAgent(e.BlockChain(), e.Engine())
|
||||
e.Miner().Register(agent)
|
||||
|
||||
return &PublicMinerAPI{e, agent}
|
||||
return &PublicMinerAPI{e}
|
||||
}
|
||||
|
||||
// Mining returns an indication if this node is currently mining.
|
||||
|
@ -87,37 +83,6 @@ func (api *PublicMinerAPI) Mining() bool {
|
|||
return api.e.IsMining()
|
||||
}
|
||||
|
||||
// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
|
||||
// accepted. Note, this is not an indication if the provided work was valid!
|
||||
func (api *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
|
||||
return api.agent.SubmitWork(nonce, digest, solution)
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner. The work package consists of 3 strings
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
func (api *PublicMinerAPI) GetWork() ([3]string, error) {
|
||||
if !api.e.IsMining() {
|
||||
if err := api.e.StartMining(false); err != nil {
|
||||
return [3]string{}, err
|
||||
}
|
||||
}
|
||||
work, err := api.agent.GetWork()
|
||||
if err != nil {
|
||||
return work, fmt.Errorf("mining not ready: %v", err)
|
||||
}
|
||||
return work, nil
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
|
||||
// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
|
||||
// must be unique between nodes.
|
||||
func (api *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
|
||||
api.agent.SubmitHashrate(id, uint64(hashrate))
|
||||
return true
|
||||
}
|
||||
|
||||
// PrivateMinerAPI provides private RPC methods to control the miner.
|
||||
// These methods can be abused by external users and must be considered insecure for use by untrusted users.
|
||||
type PrivateMinerAPI struct {
|
||||
|
@ -132,7 +97,8 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
|
|||
// Start the miner with the given number of threads. If threads is nil the number
|
||||
// of workers started is equal to the number of logical CPUs that are usable by
|
||||
// this process. If mining is already running, this method adjust the number of
|
||||
// threads allowed to use.
|
||||
// threads allowed to use and updates the minimum price required by the transaction
|
||||
// pool.
|
||||
func (api *PrivateMinerAPI) Start(threads *int) error {
|
||||
// Set the number of threads if the seal engine supports it
|
||||
if threads == nil {
|
||||
|
@ -153,7 +119,6 @@ func (api *PrivateMinerAPI) Start(threads *int) error {
|
|||
api.e.lock.RLock()
|
||||
price := api.e.gasPrice
|
||||
api.e.lock.RUnlock()
|
||||
|
||||
api.e.txPool.SetGasPrice(price)
|
||||
return api.e.StartMining(true)
|
||||
}
|
||||
|
@ -196,9 +161,14 @@ func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// SetRecommitInterval updates the interval for miner sealing work recommitting.
|
||||
func (api *PrivateMinerAPI) SetRecommitInterval(interval int) {
|
||||
api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond)
|
||||
}
|
||||
|
||||
// GetHashrate returns the current hashrate of the miner.
|
||||
func (api *PrivateMinerAPI) GetHashrate() uint64 {
|
||||
return uint64(api.e.miner.HashRate())
|
||||
return api.e.miner.HashRate()
|
||||
}
|
||||
|
||||
// PrivateAdminAPI is the collection of Ethereum full node-related APIs
|
||||
|
|
|
@ -119,6 +119,9 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block
|
|||
if to == nil {
|
||||
return nil, fmt.Errorf("end block #%d not found", end)
|
||||
}
|
||||
if from.Number().Cmp(to.Number()) >= 0 {
|
||||
return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start)
|
||||
}
|
||||
return api.traceChain(ctx, from, to, config)
|
||||
}
|
||||
|
||||
|
@ -297,7 +300,9 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
|||
database.TrieDB().Reference(root, common.Hash{})
|
||||
}
|
||||
// Dereference all past tries we ourselves are done working with
|
||||
database.TrieDB().Dereference(proot)
|
||||
if proot != (common.Hash{}) {
|
||||
database.TrieDB().Dereference(proot)
|
||||
}
|
||||
proot = root
|
||||
|
||||
// TODO(karalabe): Do we need the preimages? Won't they accumulate too much?
|
||||
|
@ -526,7 +531,9 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
|
|||
return nil, err
|
||||
}
|
||||
database.TrieDB().Reference(root, common.Hash{})
|
||||
database.TrieDB().Dereference(proot)
|
||||
if proot != (common.Hash{}) {
|
||||
database.TrieDB().Dereference(proot)
|
||||
}
|
||||
proot = root
|
||||
}
|
||||
nodes, imgs := database.TrieDB().Size()
|
||||
|
|
|
@ -124,13 +124,13 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||
chainConfig: chainConfig,
|
||||
eventMux: ctx.EventMux,
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
||||
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkID: config.NetworkId,
|
||||
gasPrice: config.GasPrice,
|
||||
gasPrice: config.MinerGasPrice,
|
||||
etherbase: config.Etherbase,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks),
|
||||
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, bloomConfirms),
|
||||
}
|
||||
|
||||
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
|
||||
|
@ -138,7 +138,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||
if !config.SkipBcVersionCheck {
|
||||
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
|
||||
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
|
||||
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d). Run geth upgradedb.\n", bcVersion, core.BlockChainVersion)
|
||||
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
|
||||
}
|
||||
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
|
||||
}
|
||||
|
@ -166,13 +166,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
|
||||
eth.miner.SetExtra(makeExtraData(config.ExtraData))
|
||||
|
||||
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit)
|
||||
eth.miner.SetExtra(makeExtraData(config.MinerExtraData))
|
||||
|
||||
eth.APIBackend = &EthAPIBackend{eth, nil}
|
||||
gpoParams := config.GPO
|
||||
if gpoParams.Default == nil {
|
||||
gpoParams.Default = config.GasPrice
|
||||
gpoParams.Default = config.MinerGasPrice
|
||||
}
|
||||
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
|
||||
|
||||
|
@ -209,7 +210,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
|||
}
|
||||
|
||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, db ethdb.Database) consensus.Engine {
|
||||
// If proof-of-authority is requested, set it up
|
||||
if chainConfig.Clique != nil {
|
||||
return clique.New(chainConfig.Clique, db)
|
||||
|
@ -221,7 +222,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
|
|||
return ethash.NewFaker()
|
||||
case ethash.ModeTest:
|
||||
log.Warn("Ethash used in test mode")
|
||||
return ethash.NewTester()
|
||||
return ethash.NewTester(nil)
|
||||
case ethash.ModeShared:
|
||||
log.Warn("Ethash used in shared mode")
|
||||
return ethash.NewShared()
|
||||
|
@ -233,7 +234,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
|
|||
DatasetDir: config.DatasetDir,
|
||||
DatasetsInMem: config.DatasetsInMem,
|
||||
DatasetsOnDisk: config.DatasetsOnDisk,
|
||||
})
|
||||
}, notify)
|
||||
engine.SetThreads(-1) // Disable CPU mining
|
||||
return engine
|
||||
}
|
||||
|
@ -411,6 +412,7 @@ func (s *Ethereum) Start(srvr *p2p.Server) error {
|
|||
func (s *Ethereum) Stop() error {
|
||||
s.bloomIndexer.Close()
|
||||
s.blockchain.Stop()
|
||||
s.engine.Close()
|
||||
s.protocolManager.Stop()
|
||||
if s.lesServer != nil {
|
||||
s.lesServer.Stop()
|
||||
|
@ -421,6 +423,5 @@ func (s *Ethereum) Stop() error {
|
|||
|
||||
s.chainDb.Close()
|
||||
close(s.shutdownChan)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package eth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -92,30 +93,28 @@ const (
|
|||
// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index
|
||||
// for the Ethereum header bloom filters, permitting blazing fast filtering.
|
||||
type BloomIndexer struct {
|
||||
size uint64 // section size to generate bloombits for
|
||||
|
||||
db ethdb.Database // database instance to write index data and metadata into
|
||||
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
|
||||
|
||||
section uint64 // Section is the section number being processed currently
|
||||
head common.Hash // Head is the hash of the last header processed
|
||||
size uint64 // section size to generate bloombits for
|
||||
db ethdb.Database // database instance to write index data and metadata into
|
||||
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
|
||||
section uint64 // Section is the section number being processed currently
|
||||
head common.Hash // Head is the hash of the last header processed
|
||||
}
|
||||
|
||||
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
|
||||
// canonical chain for fast logs filtering.
|
||||
func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
|
||||
func NewBloomIndexer(db ethdb.Database, size, confReq uint64) *core.ChainIndexer {
|
||||
backend := &BloomIndexer{
|
||||
db: db,
|
||||
size: size,
|
||||
}
|
||||
table := ethdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
|
||||
|
||||
return core.NewChainIndexer(db, table, backend, size, bloomConfirms, bloomThrottling, "bloombits")
|
||||
return core.NewChainIndexer(db, table, backend, size, confReq, bloomThrottling, "bloombits")
|
||||
}
|
||||
|
||||
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
|
||||
// section.
|
||||
func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error {
|
||||
func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
|
||||
gen, err := bloombits.NewGenerator(uint(b.size))
|
||||
b.gen, b.section, b.head = gen, section, common.Hash{}
|
||||
return err
|
||||
|
@ -123,16 +122,16 @@ func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error
|
|||
|
||||
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
|
||||
// the index.
|
||||
func (b *BloomIndexer) Process(header *types.Header) {
|
||||
func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error {
|
||||
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom)
|
||||
b.head = header.Hash()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and
|
||||
// writing it out into the database.
|
||||
func (b *BloomIndexer) Commit() error {
|
||||
batch := b.db.NewBatch()
|
||||
|
||||
for i := 0; i < types.BloomBitLength; i++ {
|
||||
bits, err := b.gen.Bitset(uint(i))
|
||||
if err != nil {
|
||||
|
|
|
@ -48,7 +48,8 @@ var DefaultConfig = Config{
|
|||
DatabaseCache: 768,
|
||||
TrieCache: 256,
|
||||
TrieTimeout: 60 * time.Minute,
|
||||
GasPrice: big.NewInt(18 * params.Shannon),
|
||||
MinerGasPrice: big.NewInt(18 * params.Shannon),
|
||||
MinerRecommit: 3 * time.Second,
|
||||
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: gasprice.Config{
|
||||
|
@ -95,10 +96,12 @@ type Config struct {
|
|||
TrieTimeout time.Duration
|
||||
|
||||
// Mining-related options
|
||||
Etherbase common.Address `toml:",omitempty"`
|
||||
MinerThreads int `toml:",omitempty"`
|
||||
ExtraData []byte `toml:",omitempty"`
|
||||
GasPrice *big.Int
|
||||
Etherbase common.Address `toml:",omitempty"`
|
||||
MinerThreads int `toml:",omitempty"`
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData []byte `toml:",omitempty"`
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit time.Duration
|
||||
|
||||
// Ethash options
|
||||
Ethash ethash.Config
|
||||
|
@ -117,5 +120,5 @@ type Config struct {
|
|||
}
|
||||
|
||||
type configMarshaling struct {
|
||||
ExtraData hexutil.Bytes
|
||||
MinerExtraData hexutil.Bytes
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package eth
|
|||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
@ -15,20 +16,26 @@ import (
|
|||
|
||||
var _ = (*configMarshaling)(nil)
|
||||
|
||||
// MarshalTOML marshals as TOML.
|
||||
func (c Config) MarshalTOML() (interface{}, error) {
|
||||
type Config struct {
|
||||
Genesis *core.Genesis `toml:",omitempty"`
|
||||
NetworkId uint64
|
||||
SyncMode downloader.SyncMode
|
||||
NoPruning bool
|
||||
LightServ int `toml:",omitempty"`
|
||||
LightPeers int `toml:",omitempty"`
|
||||
SkipBcVersionCheck bool `toml:"-"`
|
||||
DatabaseHandles int `toml:"-"`
|
||||
DatabaseCache int
|
||||
TrieCache int
|
||||
TrieTimeout time.Duration
|
||||
Etherbase common.Address `toml:",omitempty"`
|
||||
MinerThreads int `toml:",omitempty"`
|
||||
ExtraData hexutil.Bytes `toml:",omitempty"`
|
||||
GasPrice *big.Int
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData hexutil.Bytes `toml:",omitempty"`
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit time.Duration
|
||||
Ethash ethash.Config
|
||||
TxPool core.TxPoolConfig
|
||||
GPO gasprice.Config
|
||||
|
@ -39,15 +46,20 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
enc.Genesis = c.Genesis
|
||||
enc.NetworkId = c.NetworkId
|
||||
enc.SyncMode = c.SyncMode
|
||||
enc.NoPruning = c.NoPruning
|
||||
enc.LightServ = c.LightServ
|
||||
enc.LightPeers = c.LightPeers
|
||||
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
|
||||
enc.DatabaseHandles = c.DatabaseHandles
|
||||
enc.DatabaseCache = c.DatabaseCache
|
||||
enc.TrieCache = c.TrieCache
|
||||
enc.TrieTimeout = c.TrieTimeout
|
||||
enc.Etherbase = c.Etherbase
|
||||
enc.MinerThreads = c.MinerThreads
|
||||
enc.ExtraData = c.ExtraData
|
||||
enc.GasPrice = c.GasPrice
|
||||
enc.MinerNotify = c.MinerNotify
|
||||
enc.MinerExtraData = c.MinerExtraData
|
||||
enc.MinerGasPrice = c.MinerGasPrice
|
||||
enc.MinerRecommit = c.MinerRecommit
|
||||
enc.Ethash = c.Ethash
|
||||
enc.TxPool = c.TxPool
|
||||
enc.GPO = c.GPO
|
||||
|
@ -56,20 +68,26 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
return &enc, nil
|
||||
}
|
||||
|
||||
// UnmarshalTOML unmarshals from TOML.
|
||||
func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
type Config struct {
|
||||
Genesis *core.Genesis `toml:",omitempty"`
|
||||
NetworkId *uint64
|
||||
SyncMode *downloader.SyncMode
|
||||
NoPruning *bool
|
||||
LightServ *int `toml:",omitempty"`
|
||||
LightPeers *int `toml:",omitempty"`
|
||||
SkipBcVersionCheck *bool `toml:"-"`
|
||||
DatabaseHandles *int `toml:"-"`
|
||||
DatabaseCache *int
|
||||
TrieCache *int
|
||||
TrieTimeout *time.Duration
|
||||
Etherbase *common.Address `toml:",omitempty"`
|
||||
MinerThreads *int `toml:",omitempty"`
|
||||
ExtraData *hexutil.Bytes `toml:",omitempty"`
|
||||
GasPrice *big.Int
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData *hexutil.Bytes `toml:",omitempty"`
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit *time.Duration
|
||||
Ethash *ethash.Config
|
||||
TxPool *core.TxPoolConfig
|
||||
GPO *gasprice.Config
|
||||
|
@ -89,6 +107,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
if dec.SyncMode != nil {
|
||||
c.SyncMode = *dec.SyncMode
|
||||
}
|
||||
if dec.NoPruning != nil {
|
||||
c.NoPruning = *dec.NoPruning
|
||||
}
|
||||
if dec.LightServ != nil {
|
||||
c.LightServ = *dec.LightServ
|
||||
}
|
||||
|
@ -104,17 +125,29 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
if dec.DatabaseCache != nil {
|
||||
c.DatabaseCache = *dec.DatabaseCache
|
||||
}
|
||||
if dec.TrieCache != nil {
|
||||
c.TrieCache = *dec.TrieCache
|
||||
}
|
||||
if dec.TrieTimeout != nil {
|
||||
c.TrieTimeout = *dec.TrieTimeout
|
||||
}
|
||||
if dec.Etherbase != nil {
|
||||
c.Etherbase = *dec.Etherbase
|
||||
}
|
||||
if dec.MinerThreads != nil {
|
||||
c.MinerThreads = *dec.MinerThreads
|
||||
}
|
||||
if dec.ExtraData != nil {
|
||||
c.ExtraData = *dec.ExtraData
|
||||
if dec.MinerNotify != nil {
|
||||
c.MinerNotify = dec.MinerNotify
|
||||
}
|
||||
if dec.GasPrice != nil {
|
||||
c.GasPrice = dec.GasPrice
|
||||
if dec.MinerExtraData != nil {
|
||||
c.MinerExtraData = *dec.MinerExtraData
|
||||
}
|
||||
if dec.MinerGasPrice != nil {
|
||||
c.MinerGasPrice = dec.MinerGasPrice
|
||||
}
|
||||
if dec.MinerRecommit != nil {
|
||||
c.MinerRecommit = *dec.MinerRecommit
|
||||
}
|
||||
if dec.Ethash != nil {
|
||||
c.Ethash = *dec.Ethash
|
||||
|
|
|
@ -21,6 +21,7 @@ var Modules = map[string]string{
|
|||
"admin": Admin_JS,
|
||||
"chequebook": Chequebook_JS,
|
||||
"clique": Clique_JS,
|
||||
"ethash": Ethash_JS,
|
||||
"debug": Debug_JS,
|
||||
"eth": Eth_JS,
|
||||
"miner": Miner_JS,
|
||||
|
@ -109,6 +110,34 @@ web3._extend({
|
|||
});
|
||||
`
|
||||
|
||||
const Ethash_JS = `
|
||||
web3._extend({
|
||||
property: 'ethash',
|
||||
methods: [
|
||||
new web3._extend.Method({
|
||||
name: 'getWork',
|
||||
call: 'ethash_getWork',
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getHashrate',
|
||||
call: 'ethash_getHashrate',
|
||||
params: 0
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitWork',
|
||||
call: 'ethash_submitWork',
|
||||
params: 3,
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'submitHashRate',
|
||||
call: 'ethash_submitHashRate',
|
||||
params: 2,
|
||||
}),
|
||||
]
|
||||
});
|
||||
`
|
||||
|
||||
const Admin_JS = `
|
||||
web3._extend({
|
||||
property: 'admin',
|
||||
|
@ -123,6 +152,16 @@ web3._extend({
|
|||
call: 'admin_removePeer',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'addTrustedPeer',
|
||||
call: 'admin_addTrustedPeer',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'removeTrustedPeer',
|
||||
call: 'admin_removeTrustedPeer',
|
||||
params: 1
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'exportChain',
|
||||
call: 'admin_exportChain',
|
||||
|
@ -480,6 +519,11 @@ web3._extend({
|
|||
params: 1,
|
||||
inputFormatter: [web3._extend.utils.fromDecimal]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'setRecommitInterval',
|
||||
call: 'miner_setRecommitInterval',
|
||||
params: 1,
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getHashrate',
|
||||
call: 'miner_getHashrate'
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
|
@ -46,26 +45,24 @@ import (
|
|||
)
|
||||
|
||||
type LightEthereum struct {
|
||||
config *eth.Config
|
||||
lesCommons
|
||||
|
||||
odr *LesOdr
|
||||
relay *LesTxRelay
|
||||
chainConfig *params.ChainConfig
|
||||
// Channel for shutting down the service
|
||||
shutdownChan chan bool
|
||||
// Handlers
|
||||
peers *peerSet
|
||||
txPool *light.TxPool
|
||||
blockchain *light.LightChain
|
||||
protocolManager *ProtocolManager
|
||||
serverPool *serverPool
|
||||
reqDist *requestDistributor
|
||||
retriever *retrieveManager
|
||||
// DB interfaces
|
||||
chainDb ethdb.Database // Block chain database
|
||||
|
||||
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
|
||||
bloomIndexer, chtIndexer, bloomTrieIndexer *core.ChainIndexer
|
||||
// Handlers
|
||||
peers *peerSet
|
||||
txPool *light.TxPool
|
||||
blockchain *light.LightChain
|
||||
serverPool *serverPool
|
||||
reqDist *requestDistributor
|
||||
retriever *retrieveManager
|
||||
|
||||
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
|
||||
bloomIndexer *core.ChainIndexer
|
||||
|
||||
ApiBackend *LesApiBackend
|
||||
|
||||
|
@ -94,30 +91,41 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||
quitSync := make(chan struct{})
|
||||
|
||||
leth := &LightEthereum{
|
||||
config: config,
|
||||
chainConfig: chainConfig,
|
||||
chainDb: chainDb,
|
||||
eventMux: ctx.EventMux,
|
||||
peers: peers,
|
||||
reqDist: newRequestDistributor(peers, quitSync),
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkId: config.NetworkId,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency),
|
||||
chtIndexer: light.NewChtIndexer(chainDb, true),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(chainDb, true),
|
||||
lesCommons: lesCommons{
|
||||
chainDb: chainDb,
|
||||
config: config,
|
||||
},
|
||||
chainConfig: chainConfig,
|
||||
eventMux: ctx.EventMux,
|
||||
peers: peers,
|
||||
reqDist: newRequestDistributor(peers, quitSync),
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkId: config.NetworkId,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency, light.HelperTrieConfirmations),
|
||||
}
|
||||
|
||||
leth.relay = NewLesTxRelay(peers, leth.reqDist)
|
||||
leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg)
|
||||
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
|
||||
leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer, leth.retriever)
|
||||
|
||||
leth.odr = NewLesOdr(chainDb, leth.retriever)
|
||||
leth.chtIndexer = light.NewChtIndexer(chainDb, true, leth.odr)
|
||||
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, true, leth.odr)
|
||||
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
|
||||
|
||||
// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with
|
||||
// indexers already set but not started yet
|
||||
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: AddChildIndexer starts the update process for the child
|
||||
leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
|
||||
leth.chtIndexer.Start(leth.blockchain)
|
||||
leth.bloomIndexer.Start(leth.blockchain)
|
||||
|
||||
// Rewind the chain in case of an incompatible config upgrade.
|
||||
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
|
||||
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
|
||||
|
@ -126,13 +134,13 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||
}
|
||||
|
||||
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
|
||||
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
|
||||
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leth.ApiBackend = &LesApiBackend{leth, nil}
|
||||
gpoParams := config.GPO
|
||||
if gpoParams.Default == nil {
|
||||
gpoParams.Default = config.GasPrice
|
||||
gpoParams.Default = config.MinerGasPrice
|
||||
}
|
||||
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
|
||||
return leth, nil
|
||||
|
@ -208,14 +216,14 @@ func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) {
|
|||
func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain }
|
||||
func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool }
|
||||
func (s *LightEthereum) Engine() consensus.Engine { return s.engine }
|
||||
func (s *LightEthereum) LesVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
|
||||
func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) }
|
||||
func (s *LightEthereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
||||
func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux }
|
||||
|
||||
// Protocols implements node.Service, returning all the currently configured
|
||||
// network protocols to start.
|
||||
func (s *LightEthereum) Protocols() []p2p.Protocol {
|
||||
return s.protocolManager.SubProtocols
|
||||
return s.makeProtocols(ClientProtocolVersions)
|
||||
}
|
||||
|
||||
// Start implements node.Service, starting all internal goroutines needed by the
|
||||
|
@ -235,18 +243,12 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
|||
// Ethereum protocol.
|
||||
func (s *LightEthereum) Stop() error {
|
||||
s.odr.Stop()
|
||||
if s.bloomIndexer != nil {
|
||||
s.bloomIndexer.Close()
|
||||
}
|
||||
if s.chtIndexer != nil {
|
||||
s.chtIndexer.Close()
|
||||
}
|
||||
if s.bloomTrieIndexer != nil {
|
||||
s.bloomTrieIndexer.Close()
|
||||
}
|
||||
s.bloomIndexer.Close()
|
||||
s.chtIndexer.Close()
|
||||
s.blockchain.Stop()
|
||||
s.protocolManager.Stop()
|
||||
s.txPool.Stop()
|
||||
s.engine.Close()
|
||||
|
||||
s.eventMux.Stop()
|
||||
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// lesCommons contains fields needed by both server and client.
|
||||
type lesCommons struct {
|
||||
config *eth.Config
|
||||
chainDb ethdb.Database
|
||||
protocolManager *ProtocolManager
|
||||
chtIndexer, bloomTrieIndexer *core.ChainIndexer
|
||||
}
|
||||
|
||||
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
|
||||
// known about the host peer.
|
||||
type NodeInfo struct {
|
||||
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
|
||||
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
|
||||
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
|
||||
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
|
||||
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
|
||||
CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
|
||||
}
|
||||
|
||||
// makeProtocols creates protocol descriptors for the given LES versions.
|
||||
func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
|
||||
protos := make([]p2p.Protocol, len(versions))
|
||||
for i, version := range versions {
|
||||
version := version
|
||||
protos[i] = p2p.Protocol{
|
||||
Name: "les",
|
||||
Version: version,
|
||||
Length: ProtocolLengths[version],
|
||||
NodeInfo: c.nodeInfo,
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
return c.protocolManager.runPeer(version, p, rw)
|
||||
},
|
||||
PeerInfo: func(id discover.NodeID) interface{} {
|
||||
if p := c.protocolManager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
|
||||
return p.Info()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
return protos
|
||||
}
|
||||
|
||||
// nodeInfo retrieves some protocol metadata about the running host node.
|
||||
func (c *lesCommons) nodeInfo() interface{} {
|
||||
var cht light.TrustedCheckpoint
|
||||
sections, _, _ := c.chtIndexer.Sections()
|
||||
sections2, _, _ := c.bloomTrieIndexer.Sections()
|
||||
|
||||
if !c.protocolManager.lightSync {
|
||||
// convert to client section size if running in server mode
|
||||
sections /= light.CHTFrequencyClient / light.CHTFrequencyServer
|
||||
}
|
||||
|
||||
if sections2 < sections {
|
||||
sections = sections2
|
||||
}
|
||||
if sections > 0 {
|
||||
sectionIndex := sections - 1
|
||||
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
|
||||
var chtRoot common.Hash
|
||||
if c.protocolManager.lightSync {
|
||||
chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
|
||||
} else {
|
||||
chtRoot = light.GetChtV2Root(c.chainDb, sectionIndex, sectionHead)
|
||||
}
|
||||
cht = light.TrustedCheckpoint{
|
||||
SectionIdx: sectionIndex,
|
||||
SectionHead: sectionHead,
|
||||
CHTRoot: chtRoot,
|
||||
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
|
||||
}
|
||||
}
|
||||
|
||||
chain := c.protocolManager.blockchain
|
||||
head := chain.CurrentHeader()
|
||||
hash := head.Hash()
|
||||
return &NodeInfo{
|
||||
Network: c.config.NetworkId,
|
||||
Difficulty: chain.GetTd(hash, head.Number.Uint64()),
|
||||
Genesis: chain.Genesis().Hash(),
|
||||
Config: chain.Config(),
|
||||
Head: chain.CurrentHeader().Hash(),
|
||||
CHT: cht,
|
||||
}
|
||||
}
|
|
@ -20,14 +20,10 @@ package les
|
|||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrNoPeers is returned if no peers capable of serving a queued request are available
|
||||
var ErrNoPeers = errors.New("no suitable peers available")
|
||||
|
||||
// requestDistributor implements a mechanism that distributes requests to
|
||||
// suitable peers, obeying flow control rules and prioritizing them in creation
|
||||
// order (even when a resend is necessary).
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package les implements the Light Ethereum Subprotocol.
|
||||
package les
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/common/prque"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// freeClientPool implements a client database that limits the connection time
|
||||
// of each client and manages accepting/rejecting incoming connections and even
|
||||
// kicking out some connected clients. The pool calculates recent usage time
|
||||
// for each known client (a value that increases linearly when the client is
|
||||
// connected and decreases exponentially when not connected). Clients with lower
|
||||
// recent usage are preferred, unknown nodes have the highest priority. Already
|
||||
// connected nodes receive a small bias in their favor in order to avoid accepting
|
||||
// and instantly kicking out clients.
|
||||
//
|
||||
// Note: the pool can use any string for client identification. Using signature
|
||||
// keys for that purpose would not make sense when being known has a negative
|
||||
// value for the client. Currently the LES protocol manager uses IP addresses
|
||||
// (without port address) to identify clients.
|
||||
type freeClientPool struct {
|
||||
db ethdb.Database
|
||||
lock sync.Mutex
|
||||
clock mclock.Clock
|
||||
closed bool
|
||||
|
||||
connectedLimit, totalLimit int
|
||||
|
||||
addressMap map[string]*freeClientPoolEntry
|
||||
connPool, disconnPool *prque.Prque
|
||||
startupTime mclock.AbsTime
|
||||
logOffsetAtStartup int64
|
||||
}
|
||||
|
||||
const (
|
||||
recentUsageExpTC = time.Hour // time constant of the exponential weighting window for "recent" server usage
|
||||
fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format
|
||||
connectedBias = time.Minute // this bias is applied in favor of already connected clients in order to avoid kicking them out very soon
|
||||
)
|
||||
|
||||
// newFreeClientPool creates a new free client pool
|
||||
func newFreeClientPool(db ethdb.Database, connectedLimit, totalLimit int, clock mclock.Clock) *freeClientPool {
|
||||
pool := &freeClientPool{
|
||||
db: db,
|
||||
clock: clock,
|
||||
addressMap: make(map[string]*freeClientPoolEntry),
|
||||
connPool: prque.New(poolSetIndex),
|
||||
disconnPool: prque.New(poolSetIndex),
|
||||
connectedLimit: connectedLimit,
|
||||
totalLimit: totalLimit,
|
||||
}
|
||||
pool.loadFromDb()
|
||||
return pool
|
||||
}
|
||||
|
||||
func (f *freeClientPool) stop() {
|
||||
f.lock.Lock()
|
||||
f.closed = true
|
||||
f.saveToDb()
|
||||
f.lock.Unlock()
|
||||
}
|
||||
|
||||
// connect should be called after a successful handshake. If the connection was
|
||||
// rejected, there is no need to call disconnect.
|
||||
//
|
||||
// Note: the disconnectFn callback should not block.
|
||||
func (f *freeClientPool) connect(address string, disconnectFn func()) bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.closed {
|
||||
return false
|
||||
}
|
||||
e := f.addressMap[address]
|
||||
now := f.clock.Now()
|
||||
var recentUsage int64
|
||||
if e == nil {
|
||||
e = &freeClientPoolEntry{address: address, index: -1}
|
||||
f.addressMap[address] = e
|
||||
} else {
|
||||
if e.connected {
|
||||
log.Debug("Client already connected", "address", address)
|
||||
return false
|
||||
}
|
||||
recentUsage = int64(math.Exp(float64(e.logUsage-f.logOffset(now)) / fixedPointMultiplier))
|
||||
}
|
||||
e.linUsage = recentUsage - int64(now)
|
||||
// check whether (linUsage+connectedBias) is smaller than the highest entry in the connected pool
|
||||
if f.connPool.Size() == f.connectedLimit {
|
||||
i := f.connPool.PopItem().(*freeClientPoolEntry)
|
||||
if e.linUsage+int64(connectedBias)-i.linUsage < 0 {
|
||||
// kick it out and accept the new client
|
||||
f.connPool.Remove(i.index)
|
||||
f.calcLogUsage(i, now)
|
||||
i.connected = false
|
||||
f.disconnPool.Push(i, -i.logUsage)
|
||||
log.Debug("Client kicked out", "address", i.address)
|
||||
i.disconnectFn()
|
||||
} else {
|
||||
// keep the old client and reject the new one
|
||||
f.connPool.Push(i, i.linUsage)
|
||||
log.Debug("Client rejected", "address", address)
|
||||
return false
|
||||
}
|
||||
}
|
||||
f.disconnPool.Remove(e.index)
|
||||
e.connected = true
|
||||
e.disconnectFn = disconnectFn
|
||||
f.connPool.Push(e, e.linUsage)
|
||||
if f.connPool.Size()+f.disconnPool.Size() > f.totalLimit {
|
||||
f.disconnPool.Pop()
|
||||
}
|
||||
log.Debug("Client accepted", "address", address)
|
||||
return true
|
||||
}
|
||||
|
||||
// disconnect should be called when a connection is terminated. If the disconnection
|
||||
// was initiated by the pool itself using disconnectFn then calling disconnect is
|
||||
// not necessary but permitted.
|
||||
func (f *freeClientPool) disconnect(address string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.closed {
|
||||
return
|
||||
}
|
||||
e := f.addressMap[address]
|
||||
now := f.clock.Now()
|
||||
if !e.connected {
|
||||
log.Debug("Client already disconnected", "address", address)
|
||||
return
|
||||
}
|
||||
|
||||
f.connPool.Remove(e.index)
|
||||
f.calcLogUsage(e, now)
|
||||
e.connected = false
|
||||
f.disconnPool.Push(e, -e.logUsage)
|
||||
log.Debug("Client disconnected", "address", address)
|
||||
}
|
||||
|
||||
// logOffset calculates the time-dependent offset for the logarithmic
|
||||
// representation of recent usage
|
||||
func (f *freeClientPool) logOffset(now mclock.AbsTime) int64 {
|
||||
// Note: fixedPointMultiplier acts as a multiplier here; the reason for dividing the divisor
|
||||
// is to avoid int64 overflow. We assume that int64(recentUsageExpTC) >> fixedPointMultiplier.
|
||||
logDecay := int64((time.Duration(now - f.startupTime)) / (recentUsageExpTC / fixedPointMultiplier))
|
||||
return f.logOffsetAtStartup + logDecay
|
||||
}
|
||||
|
||||
// calcLogUsage converts recent usage from linear to logarithmic representation
|
||||
// when disconnecting a peer or closing the client pool
|
||||
func (f *freeClientPool) calcLogUsage(e *freeClientPoolEntry, now mclock.AbsTime) {
|
||||
dt := e.linUsage + int64(now)
|
||||
if dt < 1 {
|
||||
dt = 1
|
||||
}
|
||||
e.logUsage = int64(math.Log(float64(dt))*fixedPointMultiplier) + f.logOffset(now)
|
||||
}
|
||||
|
||||
// freeClientPoolStorage is the RLP representation of the pool's database storage
|
||||
type freeClientPoolStorage struct {
|
||||
LogOffset uint64
|
||||
List []*freeClientPoolEntry
|
||||
}
|
||||
|
||||
// loadFromDb restores pool status from the database storage
|
||||
// (automatically called at initialization)
|
||||
func (f *freeClientPool) loadFromDb() {
|
||||
enc, err := f.db.Get([]byte("freeClientPool"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var storage freeClientPoolStorage
|
||||
err = rlp.DecodeBytes(enc, &storage)
|
||||
if err != nil {
|
||||
log.Error("Failed to decode client list", "err", err)
|
||||
return
|
||||
}
|
||||
f.logOffsetAtStartup = int64(storage.LogOffset)
|
||||
f.startupTime = f.clock.Now()
|
||||
for _, e := range storage.List {
|
||||
log.Debug("Loaded free client record", "address", e.address, "logUsage", e.logUsage)
|
||||
f.addressMap[e.address] = e
|
||||
f.disconnPool.Push(e, -e.logUsage)
|
||||
}
|
||||
}
|
||||
|
||||
// saveToDb saves pool status to the database storage
|
||||
// (automatically called during shutdown)
|
||||
func (f *freeClientPool) saveToDb() {
|
||||
now := f.clock.Now()
|
||||
storage := freeClientPoolStorage{
|
||||
LogOffset: uint64(f.logOffset(now)),
|
||||
List: make([]*freeClientPoolEntry, len(f.addressMap)),
|
||||
}
|
||||
i := 0
|
||||
for _, e := range f.addressMap {
|
||||
if e.connected {
|
||||
f.calcLogUsage(e, now)
|
||||
}
|
||||
storage.List[i] = e
|
||||
i++
|
||||
}
|
||||
enc, err := rlp.EncodeToBytes(storage)
|
||||
if err != nil {
|
||||
log.Error("Failed to encode client list", "err", err)
|
||||
} else {
|
||||
f.db.Put([]byte("freeClientPool"), enc)
|
||||
}
|
||||
}
|
||||
|
||||
// freeClientPoolEntry represents a client address known by the pool.
|
||||
// When connected, recent usage is calculated as linUsage + int64(clock.Now())
|
||||
// When disconnected, it is calculated as exp(logUsage - logOffset) where logOffset
|
||||
// also grows linearly with time while the server is running.
|
||||
// Conversion between linear and logarithmic representation happens when connecting
|
||||
// or disconnecting the node.
|
||||
//
|
||||
// Note: linUsage and logUsage are values used with constantly growing offsets so
|
||||
// even though they are close to each other at any time they may wrap around int64
|
||||
// limits over time. Comparison should be performed accordingly.
|
||||
type freeClientPoolEntry struct {
|
||||
address string
|
||||
connected bool
|
||||
disconnectFn func()
|
||||
linUsage, logUsage int64
|
||||
index int
|
||||
}
|
||||
|
||||
func (e *freeClientPoolEntry) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, []interface{}{e.address, uint64(e.logUsage)})
|
||||
}
|
||||
|
||||
func (e *freeClientPoolEntry) DecodeRLP(s *rlp.Stream) error {
|
||||
var entry struct {
|
||||
Address string
|
||||
LogUsage uint64
|
||||
}
|
||||
if err := s.Decode(&entry); err != nil {
|
||||
return err
|
||||
}
|
||||
e.address = entry.Address
|
||||
e.logUsage = int64(entry.LogUsage)
|
||||
e.connected = false
|
||||
e.index = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
// poolSetIndex callback is used by both priority queues to set/update the index of
|
||||
// the element in the queue. Index is needed to remove elements other than the top one.
|
||||
func poolSetIndex(a interface{}, i int) {
|
||||
a.(*freeClientPoolEntry).index = i
|
||||
}
|
|
@ -20,7 +20,6 @@ package les
|
|||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
|
@ -28,6 +27,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
|
@ -39,7 +39,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
@ -64,10 +63,6 @@ const (
|
|||
disableClientRemovePeer = false
|
||||
)
|
||||
|
||||
// errIncompatibleConfig is returned if the requested protocols and configs are
|
||||
// not compatible (low protocol version restrictions and high requirements).
|
||||
var errIncompatibleConfig = errors.New("incompatible configuration")
|
||||
|
||||
func errResp(code errCode, format string, v ...interface{}) error {
|
||||
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
@ -104,6 +99,7 @@ type ProtocolManager struct {
|
|||
odr *LesOdr
|
||||
server *LesServer
|
||||
serverPool *serverPool
|
||||
clientPool *freeClientPool
|
||||
lesTopic discv5.Topic
|
||||
reqDist *requestDistributor
|
||||
retriever *retrieveManager
|
||||
|
@ -113,8 +109,6 @@ type ProtocolManager struct {
|
|||
peers *peerSet
|
||||
maxPeers int
|
||||
|
||||
SubProtocols []p2p.Protocol
|
||||
|
||||
eventMux *event.TypeMux
|
||||
|
||||
// channels for fetcher, syncer, txsyncLoop
|
||||
|
@ -129,7 +123,7 @@ type ProtocolManager struct {
|
|||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protocolVersions []uint, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
|
||||
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
lightSync: lightSync,
|
||||
|
@ -153,54 +147,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco
|
|||
manager.reqDist = odr.retriever.dist
|
||||
}
|
||||
|
||||
// Initiate a sub-protocol for every implemented version we can handle
|
||||
manager.SubProtocols = make([]p2p.Protocol, 0, len(protocolVersions))
|
||||
for _, version := range protocolVersions {
|
||||
// Compatible, initialize the sub-protocol
|
||||
version := version // Closure for the run
|
||||
manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
|
||||
Name: "les",
|
||||
Version: version,
|
||||
Length: ProtocolLengths[version],
|
||||
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
var entry *poolEntry
|
||||
peer := manager.newPeer(int(version), networkId, p, rw)
|
||||
if manager.serverPool != nil {
|
||||
addr := p.RemoteAddr().(*net.TCPAddr)
|
||||
entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port))
|
||||
}
|
||||
peer.poolEntry = entry
|
||||
select {
|
||||
case manager.newPeerCh <- peer:
|
||||
manager.wg.Add(1)
|
||||
defer manager.wg.Done()
|
||||
err := manager.handle(peer)
|
||||
if entry != nil {
|
||||
manager.serverPool.disconnect(entry)
|
||||
}
|
||||
return err
|
||||
case <-manager.quitSync:
|
||||
if entry != nil {
|
||||
manager.serverPool.disconnect(entry)
|
||||
}
|
||||
return p2p.DiscQuitting
|
||||
}
|
||||
},
|
||||
NodeInfo: func() interface{} {
|
||||
return manager.NodeInfo()
|
||||
},
|
||||
PeerInfo: func(id discover.NodeID) interface{} {
|
||||
if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
|
||||
return p.Info()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}
|
||||
if len(manager.SubProtocols) == 0 {
|
||||
return nil, errIncompatibleConfig
|
||||
}
|
||||
|
||||
removePeer := manager.removePeer
|
||||
if disableClientRemovePeer {
|
||||
removePeer = func(id string) {}
|
||||
|
@ -226,6 +172,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
|
|||
if pm.lightSync {
|
||||
go pm.syncer()
|
||||
} else {
|
||||
pm.clientPool = newFreeClientPool(pm.chainDb, maxPeers, 10000, mclock.System{})
|
||||
go func() {
|
||||
for range pm.newPeerCh {
|
||||
}
|
||||
|
@ -243,6 +190,9 @@ func (pm *ProtocolManager) Stop() {
|
|||
pm.noMorePeers <- struct{}{}
|
||||
|
||||
close(pm.quitSync) // quits syncer, fetcher
|
||||
if pm.clientPool != nil {
|
||||
pm.clientPool.stop()
|
||||
}
|
||||
|
||||
// Stop downloader and make sure that all the running downloads are complete.
|
||||
pm.downloader.Terminate()
|
||||
|
@ -259,6 +209,32 @@ func (pm *ProtocolManager) Stop() {
|
|||
log.Info("Light Ethereum protocol stopped")
|
||||
}
|
||||
|
||||
// runPeer is the p2p protocol run function for the given version.
|
||||
func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
var entry *poolEntry
|
||||
peer := pm.newPeer(int(version), pm.networkId, p, rw)
|
||||
if pm.serverPool != nil {
|
||||
addr := p.RemoteAddr().(*net.TCPAddr)
|
||||
entry = pm.serverPool.connect(peer, addr.IP, uint16(addr.Port))
|
||||
}
|
||||
peer.poolEntry = entry
|
||||
select {
|
||||
case pm.newPeerCh <- peer:
|
||||
pm.wg.Add(1)
|
||||
defer pm.wg.Done()
|
||||
err := pm.handle(peer)
|
||||
if entry != nil {
|
||||
pm.serverPool.disconnect(entry)
|
||||
}
|
||||
return err
|
||||
case <-pm.quitSync:
|
||||
if entry != nil {
|
||||
pm.serverPool.disconnect(entry)
|
||||
}
|
||||
return p2p.DiscQuitting
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
|
||||
return newPeer(pv, nv, p, newMeteredMsgWriter(rw))
|
||||
}
|
||||
|
@ -267,7 +243,8 @@ func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgRea
|
|||
// this function terminates, the peer is disconnected.
|
||||
func (pm *ProtocolManager) handle(p *peer) error {
|
||||
// Ignore maxPeers if this is a trusted peer
|
||||
if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
|
||||
// In server mode we try to check into the client pool after handshake
|
||||
if pm.lightSync && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
|
||||
return p2p.DiscTooManyPeers
|
||||
}
|
||||
|
||||
|
@ -285,6 +262,19 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
|||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !pm.lightSync && !p.Peer.Info().Network.Trusted {
|
||||
addr, ok := p.RemoteAddr().(*net.TCPAddr)
|
||||
// test peer address is not a tcp address, don't use client pool if can not typecast
|
||||
if ok {
|
||||
id := addr.IP.String()
|
||||
if !pm.clientPool.connect(id, func() { go pm.removePeer(p.id) }) {
|
||||
return p2p.DiscTooManyPeers
|
||||
}
|
||||
defer pm.clientPool.disconnect(id)
|
||||
}
|
||||
}
|
||||
|
||||
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
|
||||
rw.Init(p.version)
|
||||
}
|
||||
|
@ -1186,30 +1176,6 @@ func (pm *ProtocolManager) txStatus(hashes []common.Hash) []txStatus {
|
|||
return stats
|
||||
}
|
||||
|
||||
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
|
||||
// known about the host peer.
|
||||
type NodeInfo struct {
|
||||
Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
|
||||
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
|
||||
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
|
||||
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
|
||||
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
|
||||
}
|
||||
|
||||
// NodeInfo retrieves some protocol metadata about the running host node.
|
||||
func (self *ProtocolManager) NodeInfo() *NodeInfo {
|
||||
head := self.blockchain.CurrentHeader()
|
||||
hash := head.Hash()
|
||||
|
||||
return &NodeInfo{
|
||||
Network: self.networkId,
|
||||
Difficulty: self.blockchain.GetTd(hash, head.Number.Uint64()),
|
||||
Genesis: self.blockchain.Genesis().Hash(),
|
||||
Config: self.blockchain.Config(),
|
||||
Head: hash,
|
||||
}
|
||||
}
|
||||
|
||||
// downloaderPeerNotify implements peerSetNotify
|
||||
type downloaderPeerNotify ProtocolManager
|
||||
|
||||
|
@ -1241,7 +1207,7 @@ func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, s
|
|||
}
|
||||
_, ok := <-pc.manager.reqDist.queue(rq)
|
||||
if !ok {
|
||||
return ErrNoPeers
|
||||
return light.ErrNoPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1265,7 +1231,7 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip
|
|||
}
|
||||
_, ok := <-pc.manager.reqDist.queue(rq)
|
||||
if !ok {
|
||||
return ErrNoPeers
|
||||
return light.ErrNoPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -33,14 +33,11 @@ type LesOdr struct {
|
|||
stop chan struct{}
|
||||
}
|
||||
|
||||
func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr {
|
||||
func NewLesOdr(db ethdb.Database, retriever *retrieveManager) *LesOdr {
|
||||
return &LesOdr{
|
||||
db: db,
|
||||
chtIndexer: chtIndexer,
|
||||
bloomTrieIndexer: bloomTrieIndexer,
|
||||
bloomIndexer: bloomIndexer,
|
||||
retriever: retriever,
|
||||
stop: make(chan struct{}),
|
||||
db: db,
|
||||
retriever: retriever,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +51,13 @@ func (odr *LesOdr) Database() ethdb.Database {
|
|||
return odr.db
|
||||
}
|
||||
|
||||
// SetIndexers adds the necessary chain indexers to the ODR backend
|
||||
func (odr *LesOdr) SetIndexers(chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer) {
|
||||
odr.chtIndexer = chtIndexer
|
||||
odr.bloomTrieIndexer = bloomTrieIndexer
|
||||
odr.bloomIndexer = bloomIndexer
|
||||
}
|
||||
|
||||
// ChtIndexer returns the CHT chain indexer
|
||||
func (odr *LesOdr) ChtIndexer() *core.ChainIndexer {
|
||||
return odr.chtIndexer
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -207,7 +208,7 @@ func (r *sentReq) stateRequesting() reqStateFn {
|
|||
return r.stateNoMorePeers
|
||||
}
|
||||
// nothing to wait for, no more peers to ask, return with error
|
||||
r.stop(ErrNoPeers)
|
||||
r.stop(light.ErrNoPeers)
|
||||
// no need to go to stopped state because waiting() already returned false
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -38,21 +38,19 @@ import (
|
|||
)
|
||||
|
||||
type LesServer struct {
|
||||
config *eth.Config
|
||||
protocolManager *ProtocolManager
|
||||
fcManager *flowcontrol.ClientManager // nil if our node is client only
|
||||
fcCostStats *requestCostStats
|
||||
defParams *flowcontrol.ServerParams
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
quitSync chan struct{}
|
||||
lesCommons
|
||||
|
||||
chtIndexer, bloomTrieIndexer *core.ChainIndexer
|
||||
fcManager *flowcontrol.ClientManager // nil if our node is client only
|
||||
fcCostStats *requestCostStats
|
||||
defParams *flowcontrol.ServerParams
|
||||
lesTopics []discv5.Topic
|
||||
privateKey *ecdsa.PrivateKey
|
||||
quitSync chan struct{}
|
||||
}
|
||||
|
||||
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
quitSync := make(chan struct{})
|
||||
pm, err := NewProtocolManager(eth.BlockChain().Config(), false, ServerProtocolVersions, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup))
|
||||
pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -63,13 +61,17 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||
}
|
||||
|
||||
srv := &LesServer{
|
||||
config: config,
|
||||
protocolManager: pm,
|
||||
quitSync: quitSync,
|
||||
lesTopics: lesTopics,
|
||||
chtIndexer: light.NewChtIndexer(eth.ChainDb(), false),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false),
|
||||
lesCommons: lesCommons{
|
||||
config: config,
|
||||
chainDb: eth.ChainDb(),
|
||||
chtIndexer: light.NewChtIndexer(eth.ChainDb(), false, nil),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false, nil),
|
||||
protocolManager: pm,
|
||||
},
|
||||
quitSync: quitSync,
|
||||
lesTopics: lesTopics,
|
||||
}
|
||||
|
||||
logger := log.New()
|
||||
|
||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
||||
|
@ -104,7 +106,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||
}
|
||||
|
||||
func (s *LesServer) Protocols() []p2p.Protocol {
|
||||
return s.protocolManager.SubProtocols
|
||||
return s.makeProtocols(ServerProtocolVersions)
|
||||
}
|
||||
|
||||
// Start starts the LES server
|
||||
|
|
|
@ -116,19 +116,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
|
|||
}
|
||||
|
||||
// addTrustedCheckpoint adds a trusted checkpoint to the blockchain
|
||||
func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) {
|
||||
func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) {
|
||||
if self.odr.ChtIndexer() != nil {
|
||||
StoreChtRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.chtRoot)
|
||||
self.odr.ChtIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
||||
StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot)
|
||||
self.odr.ChtIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
if self.odr.BloomTrieIndexer() != nil {
|
||||
StoreBloomTrieRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.bloomTrieRoot)
|
||||
self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
||||
StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot)
|
||||
self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
if self.odr.BloomIndexer() != nil {
|
||||
self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
||||
self.odr.BloomIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*CHTFrequencyClient-1, "hash", cp.sectionHead)
|
||||
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*CHTFrequencyClient-1, "hash", cp.SectionHead)
|
||||
}
|
||||
|
||||
func (self *LightChain) getProcInterrupt() bool {
|
||||
|
@ -464,22 +464,32 @@ func (self *LightChain) GetHeaderByNumberOdr(ctx context.Context, number uint64)
|
|||
func (self *LightChain) Config() *params.ChainConfig { return self.hc.Config() }
|
||||
|
||||
func (self *LightChain) SyncCht(ctx context.Context) bool {
|
||||
// If we don't have a CHT indexer, abort
|
||||
if self.odr.ChtIndexer() == nil {
|
||||
return false
|
||||
}
|
||||
headNum := self.CurrentHeader().Number.Uint64()
|
||||
chtCount, _, _ := self.odr.ChtIndexer().Sections()
|
||||
if headNum+1 < chtCount*CHTFrequencyClient {
|
||||
num := chtCount*CHTFrequencyClient - 1
|
||||
header, err := GetHeaderByNumber(ctx, self.odr, num)
|
||||
if header != nil && err == nil {
|
||||
self.mu.Lock()
|
||||
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
|
||||
self.hc.SetCurrentHeader(header)
|
||||
}
|
||||
self.mu.Unlock()
|
||||
return true
|
||||
// Ensure the remote CHT head is ahead of us
|
||||
head := self.CurrentHeader().Number.Uint64()
|
||||
sections, _, _ := self.odr.ChtIndexer().Sections()
|
||||
|
||||
latest := sections*CHTFrequencyClient - 1
|
||||
if clique := self.hc.Config().Clique; clique != nil {
|
||||
latest -= latest % clique.Epoch // epoch snapshot for clique
|
||||
}
|
||||
if head >= latest {
|
||||
return false
|
||||
}
|
||||
// Retrieve the latest useful header and update to it
|
||||
if header, err := GetHeaderByNumber(ctx, self.odr, latest); header != nil && err == nil {
|
||||
self.mu.Lock()
|
||||
defer self.mu.Unlock()
|
||||
|
||||
// Ensure the chain didn't move past the latest block while retrieving it
|
||||
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
|
||||
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash())
|
||||
self.hc.SetCurrentHeader(header)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package light
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -33,6 +34,9 @@ import (
|
|||
// service is not required.
|
||||
var NoOdr = context.Background()
|
||||
|
||||
// ErrNoPeers is returned if no peers capable of serving a queued request are available
|
||||
var ErrNoPeers = errors.New("no suitable peers available")
|
||||
|
||||
// OdrBackend is an interface to a backend service that handles ODR retrievals type
|
||||
type OdrBackend interface {
|
||||
Database() ethdb.Database
|
||||
|
|
|
@ -17,8 +17,10 @@
|
|||
package light
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
|
@ -47,37 +49,38 @@ const (
|
|||
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
|
||||
)
|
||||
|
||||
// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
|
||||
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
|
||||
// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
|
||||
// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
|
||||
type trustedCheckpoint struct {
|
||||
name string
|
||||
sectionIdx uint64
|
||||
sectionHead, chtRoot, bloomTrieRoot common.Hash
|
||||
type TrustedCheckpoint struct {
|
||||
name string
|
||||
SectionIdx uint64
|
||||
SectionHead, CHTRoot, BloomRoot common.Hash
|
||||
}
|
||||
|
||||
var (
|
||||
mainnetCheckpoint = trustedCheckpoint{
|
||||
name: "mainnet",
|
||||
sectionIdx: 187,
|
||||
sectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"),
|
||||
chtRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"),
|
||||
bloomTrieRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"),
|
||||
}
|
||||
|
||||
ropstenCheckpoint = trustedCheckpoint{
|
||||
name: "ropsten",
|
||||
sectionIdx: 117,
|
||||
sectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"),
|
||||
chtRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"),
|
||||
bloomTrieRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"),
|
||||
}
|
||||
)
|
||||
|
||||
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
|
||||
var trustedCheckpoints = map[common.Hash]trustedCheckpoint{
|
||||
params.MainnetGenesisHash: mainnetCheckpoint,
|
||||
params.TestnetGenesisHash: ropstenCheckpoint,
|
||||
var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{
|
||||
params.MainnetGenesisHash: {
|
||||
name: "mainnet",
|
||||
SectionIdx: 187,
|
||||
SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"),
|
||||
CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"),
|
||||
BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"),
|
||||
},
|
||||
params.TestnetGenesisHash: {
|
||||
name: "ropsten",
|
||||
SectionIdx: 117,
|
||||
SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"),
|
||||
CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"),
|
||||
BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"),
|
||||
},
|
||||
params.RinkebyGenesisHash: {
|
||||
name: "rinkeby",
|
||||
SectionIdx: 85,
|
||||
SectionHead: common.HexToHash("92cfa67afc4ad8ab0dcbc6fa49efd14b5b19402442e7317e6bc879d85f89d64d"),
|
||||
CHTRoot: common.HexToHash("2802ec92cd7a54a75bca96afdc666ae7b99e5d96cf8192dcfb09588812f51564"),
|
||||
BloomRoot: common.HexToHash("ebefeb31a9a42866d8cf2d2477704b4c3d7c20d0e4e9b5aaa77f396e016a1263"),
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -119,7 +122,8 @@ func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common
|
|||
|
||||
// ChtIndexerBackend implements core.ChainIndexerBackend
|
||||
type ChtIndexerBackend struct {
|
||||
diskdb ethdb.Database
|
||||
diskdb, trieTable ethdb.Database
|
||||
odr OdrBackend
|
||||
triedb *trie.Database
|
||||
section, sectionSize uint64
|
||||
lastHash common.Hash
|
||||
|
@ -127,7 +131,7 @@ type ChtIndexerBackend struct {
|
|||
}
|
||||
|
||||
// NewBloomTrieIndexer creates a BloomTrie chain indexer
|
||||
func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
||||
func NewChtIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer {
|
||||
var sectionSize, confirmReq uint64
|
||||
if clientMode {
|
||||
sectionSize = CHTFrequencyClient
|
||||
|
@ -137,28 +141,64 @@ func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
|||
confirmReq = HelperTrieProcessConfirmations
|
||||
}
|
||||
idb := ethdb.NewTable(db, "chtIndex-")
|
||||
trieTable := ethdb.NewTable(db, ChtTablePrefix)
|
||||
backend := &ChtIndexerBackend{
|
||||
diskdb: db,
|
||||
triedb: trie.NewDatabase(ethdb.NewTable(db, ChtTablePrefix)),
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabase(trieTable),
|
||||
sectionSize: sectionSize,
|
||||
}
|
||||
return core.NewChainIndexer(db, idb, backend, sectionSize, confirmReq, time.Millisecond*100, "cht")
|
||||
}
|
||||
|
||||
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
|
||||
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
|
||||
func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
|
||||
batch := c.trieTable.NewBatch()
|
||||
r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1}
|
||||
for {
|
||||
err := c.odr.Retrieve(ctx, r)
|
||||
switch err {
|
||||
case nil:
|
||||
r.Proof.Store(batch)
|
||||
return batch.Write()
|
||||
case ErrNoPeers:
|
||||
// if there are no peers to serve, retry later
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Second * 10):
|
||||
// stay in the loop and try again
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset implements core.ChainIndexerBackend
|
||||
func (c *ChtIndexerBackend) Reset(section uint64, lastSectionHead common.Hash) error {
|
||||
func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
|
||||
var root common.Hash
|
||||
if section > 0 {
|
||||
root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
|
||||
}
|
||||
var err error
|
||||
c.trie, err = trie.New(root, c.triedb)
|
||||
|
||||
if err != nil && c.odr != nil {
|
||||
err = c.fetchMissingNodes(ctx, section, root)
|
||||
if err == nil {
|
||||
c.trie, err = trie.New(root, c.triedb)
|
||||
}
|
||||
}
|
||||
|
||||
c.section = section
|
||||
return err
|
||||
}
|
||||
|
||||
// Process implements core.ChainIndexerBackend
|
||||
func (c *ChtIndexerBackend) Process(header *types.Header) {
|
||||
func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error {
|
||||
hash, num := header.Hash(), header.Number.Uint64()
|
||||
c.lastHash = hash
|
||||
|
||||
|
@ -170,6 +210,7 @@ func (c *ChtIndexerBackend) Process(header *types.Header) {
|
|||
binary.BigEndian.PutUint64(encNumber[:], num)
|
||||
data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
|
||||
c.trie.Update(encNumber[:], data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit implements core.ChainIndexerBackend
|
||||
|
@ -181,16 +222,15 @@ func (c *ChtIndexerBackend) Commit() error {
|
|||
c.triedb.Commit(root, false)
|
||||
|
||||
if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 {
|
||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", c.lastHash, "root", root)
|
||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
||||
}
|
||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
BloomTrieFrequency = 32768
|
||||
ethBloomBitsSection = 4096
|
||||
ethBloomBitsConfirmations = 256
|
||||
BloomTrieFrequency = 32768
|
||||
ethBloomBitsSection = 4096
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -215,7 +255,8 @@ func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root
|
|||
|
||||
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
|
||||
type BloomTrieIndexerBackend struct {
|
||||
diskdb ethdb.Database
|
||||
diskdb, trieTable ethdb.Database
|
||||
odr OdrBackend
|
||||
triedb *trie.Database
|
||||
section, parentSectionSize, bloomTrieRatio uint64
|
||||
trie *trie.Trie
|
||||
|
@ -223,44 +264,98 @@ type BloomTrieIndexerBackend struct {
|
|||
}
|
||||
|
||||
// NewBloomTrieIndexer creates a BloomTrie chain indexer
|
||||
func NewBloomTrieIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
||||
func NewBloomTrieIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer {
|
||||
trieTable := ethdb.NewTable(db, BloomTrieTablePrefix)
|
||||
backend := &BloomTrieIndexerBackend{
|
||||
diskdb: db,
|
||||
triedb: trie.NewDatabase(ethdb.NewTable(db, BloomTrieTablePrefix)),
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabase(trieTable),
|
||||
}
|
||||
idb := ethdb.NewTable(db, "bltIndex-")
|
||||
|
||||
var confirmReq uint64
|
||||
if clientMode {
|
||||
backend.parentSectionSize = BloomTrieFrequency
|
||||
confirmReq = HelperTrieConfirmations
|
||||
} else {
|
||||
backend.parentSectionSize = ethBloomBitsSection
|
||||
confirmReq = HelperTrieProcessConfirmations
|
||||
}
|
||||
backend.bloomTrieRatio = BloomTrieFrequency / backend.parentSectionSize
|
||||
backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
|
||||
return core.NewChainIndexer(db, idb, backend, BloomTrieFrequency, confirmReq-ethBloomBitsConfirmations, time.Millisecond*100, "bloomtrie")
|
||||
return core.NewChainIndexer(db, idb, backend, BloomTrieFrequency, 0, time.Millisecond*100, "bloomtrie")
|
||||
}
|
||||
|
||||
// fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
|
||||
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
|
||||
func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
|
||||
indexCh := make(chan uint, types.BloomBitLength)
|
||||
type res struct {
|
||||
nodes *NodeSet
|
||||
err error
|
||||
}
|
||||
resCh := make(chan res, types.BloomBitLength)
|
||||
for i := 0; i < 20; i++ {
|
||||
go func() {
|
||||
for bitIndex := range indexCh {
|
||||
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}}
|
||||
for {
|
||||
if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
|
||||
// if there are no peers to serve, retry later
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
resCh <- res{nil, ctx.Err()}
|
||||
return
|
||||
case <-time.After(time.Second * 10):
|
||||
// stay in the loop and try again
|
||||
}
|
||||
} else {
|
||||
resCh <- res{r.Proofs, err}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := uint(0); i < types.BloomBitLength; i++ {
|
||||
indexCh <- i
|
||||
}
|
||||
close(indexCh)
|
||||
batch := b.trieTable.NewBatch()
|
||||
for i := uint(0); i < types.BloomBitLength; i++ {
|
||||
res := <-resCh
|
||||
if res.err != nil {
|
||||
return res.err
|
||||
}
|
||||
res.nodes.Store(batch)
|
||||
}
|
||||
return batch.Write()
|
||||
}
|
||||
|
||||
// Reset implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Reset(section uint64, lastSectionHead common.Hash) error {
|
||||
func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error {
|
||||
var root common.Hash
|
||||
if section > 0 {
|
||||
root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
|
||||
}
|
||||
var err error
|
||||
b.trie, err = trie.New(root, b.triedb)
|
||||
if err != nil && b.odr != nil {
|
||||
err = b.fetchMissingNodes(ctx, section, root)
|
||||
if err == nil {
|
||||
b.trie, err = trie.New(root, b.triedb)
|
||||
}
|
||||
}
|
||||
b.section = section
|
||||
return err
|
||||
}
|
||||
|
||||
// Process implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Process(header *types.Header) {
|
||||
func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
|
||||
num := header.Number.Uint64() - b.section*BloomTrieFrequency
|
||||
if (num+1)%b.parentSectionSize == 0 {
|
||||
b.sectionHeads[num/b.parentSectionSize] = header.Hash()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit implements core.ChainIndexerBackend
|
||||
|
@ -300,7 +395,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
|||
b.triedb.Commit(root, false)
|
||||
|
||||
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
|
||||
log.Info("Storing bloom trie", "section", b.section, "head", sectionHead, "root", root, "compression", float64(compSize)/float64(decompSize))
|
||||
log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
|
||||
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package miner
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type CpuAgent struct {
|
||||
mu sync.Mutex
|
||||
|
||||
workCh chan *Work
|
||||
stop chan struct{}
|
||||
quitCurrentOp chan struct{}
|
||||
returnCh chan<- *Result
|
||||
|
||||
chain consensus.ChainReader
|
||||
engine consensus.Engine
|
||||
|
||||
isMining int32 // isMining indicates whether the agent is currently mining
|
||||
}
|
||||
|
||||
func NewCpuAgent(chain consensus.ChainReader, engine consensus.Engine) *CpuAgent {
|
||||
miner := &CpuAgent{
|
||||
chain: chain,
|
||||
engine: engine,
|
||||
stop: make(chan struct{}, 1),
|
||||
workCh: make(chan *Work, 1),
|
||||
}
|
||||
return miner
|
||||
}
|
||||
|
||||
func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
|
||||
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
|
||||
|
||||
func (self *CpuAgent) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&self.isMining, 1, 0) {
|
||||
return // agent already stopped
|
||||
}
|
||||
self.stop <- struct{}{}
|
||||
done:
|
||||
// Empty work channel
|
||||
for {
|
||||
select {
|
||||
case <-self.workCh:
|
||||
default:
|
||||
break done
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *CpuAgent) Start() {
|
||||
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
|
||||
return // agent already started
|
||||
}
|
||||
go self.update()
|
||||
}
|
||||
|
||||
func (self *CpuAgent) update() {
|
||||
out:
|
||||
for {
|
||||
select {
|
||||
case work := <-self.workCh:
|
||||
self.mu.Lock()
|
||||
if self.quitCurrentOp != nil {
|
||||
close(self.quitCurrentOp)
|
||||
}
|
||||
self.quitCurrentOp = make(chan struct{})
|
||||
go self.mine(work, self.quitCurrentOp)
|
||||
self.mu.Unlock()
|
||||
case <-self.stop:
|
||||
self.mu.Lock()
|
||||
if self.quitCurrentOp != nil {
|
||||
close(self.quitCurrentOp)
|
||||
self.quitCurrentOp = nil
|
||||
}
|
||||
self.mu.Unlock()
|
||||
break out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
|
||||
if result, err := self.engine.Seal(self.chain, work.Block, stop); result != nil {
|
||||
log.Info("Successfully sealed new block", "number", result.Number(), "hash", result.Hash())
|
||||
self.returnCh <- &Result{work, result}
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Warn("Block sealing failed", "err", err)
|
||||
}
|
||||
self.returnCh <- nil
|
||||
}
|
||||
}
|
||||
|
||||
func (self *CpuAgent) GetHashRate() int64 {
|
||||
if pow, ok := self.engine.(consensus.PoW); ok {
|
||||
return int64(pow.Hashrate())
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -20,15 +20,14 @@ package miner
|
|||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
@ -36,36 +35,32 @@ import (
|
|||
|
||||
// Backend wraps all methods required for mining.
|
||||
type Backend interface {
|
||||
AccountManager() *accounts.Manager
|
||||
BlockChain() *core.BlockChain
|
||||
TxPool() *core.TxPool
|
||||
ChainDb() ethdb.Database
|
||||
}
|
||||
|
||||
// Miner creates blocks and searches for proof-of-work values.
|
||||
type Miner struct {
|
||||
mux *event.TypeMux
|
||||
|
||||
worker *worker
|
||||
|
||||
mux *event.TypeMux
|
||||
worker *worker
|
||||
coinbase common.Address
|
||||
mining int32
|
||||
eth Backend
|
||||
engine consensus.Engine
|
||||
exitCh chan struct{}
|
||||
|
||||
canStart int32 // can start indicates whether we can start the mining operation
|
||||
shouldStart int32 // should start indicates whether we should start after sync
|
||||
}
|
||||
|
||||
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine) *Miner {
|
||||
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration) *Miner {
|
||||
miner := &Miner{
|
||||
eth: eth,
|
||||
mux: mux,
|
||||
engine: engine,
|
||||
worker: newWorker(config, engine, common.Address{}, eth, mux),
|
||||
exitCh: make(chan struct{}),
|
||||
worker: newWorker(config, engine, eth, mux, recommit),
|
||||
canStart: 1,
|
||||
}
|
||||
miner.Register(NewCpuAgent(eth.BlockChain(), engine))
|
||||
go miner.update()
|
||||
|
||||
return miner
|
||||
|
@ -77,28 +72,35 @@ func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine con
|
|||
// and halt your mining operation for as long as the DOS continues.
|
||||
func (self *Miner) update() {
|
||||
events := self.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
|
||||
out:
|
||||
for ev := range events.Chan() {
|
||||
switch ev.Data.(type) {
|
||||
case downloader.StartEvent:
|
||||
atomic.StoreInt32(&self.canStart, 0)
|
||||
if self.Mining() {
|
||||
self.Stop()
|
||||
atomic.StoreInt32(&self.shouldStart, 1)
|
||||
log.Info("Mining aborted due to sync")
|
||||
}
|
||||
case downloader.DoneEvent, downloader.FailedEvent:
|
||||
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
||||
defer events.Unsubscribe()
|
||||
|
||||
atomic.StoreInt32(&self.canStart, 1)
|
||||
atomic.StoreInt32(&self.shouldStart, 0)
|
||||
if shouldStart {
|
||||
self.Start(self.coinbase)
|
||||
for {
|
||||
select {
|
||||
case ev := <-events.Chan():
|
||||
if ev == nil {
|
||||
return
|
||||
}
|
||||
// unsubscribe. we're only interested in this event once
|
||||
events.Unsubscribe()
|
||||
// stop immediately and ignore all further pending events
|
||||
break out
|
||||
switch ev.Data.(type) {
|
||||
case downloader.StartEvent:
|
||||
atomic.StoreInt32(&self.canStart, 0)
|
||||
if self.Mining() {
|
||||
self.Stop()
|
||||
atomic.StoreInt32(&self.shouldStart, 1)
|
||||
log.Info("Mining aborted due to sync")
|
||||
}
|
||||
case downloader.DoneEvent, downloader.FailedEvent:
|
||||
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
|
||||
|
||||
atomic.StoreInt32(&self.canStart, 1)
|
||||
atomic.StoreInt32(&self.shouldStart, 0)
|
||||
if shouldStart {
|
||||
self.Start(self.coinbase)
|
||||
}
|
||||
// stop immediately and ignore all further pending events
|
||||
return
|
||||
}
|
||||
case <-self.exitCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -111,47 +113,28 @@ func (self *Miner) Start(coinbase common.Address) {
|
|||
log.Info("Network syncing, will start miner afterwards")
|
||||
return
|
||||
}
|
||||
atomic.StoreInt32(&self.mining, 1)
|
||||
|
||||
log.Info("Starting mining operation")
|
||||
self.worker.start()
|
||||
self.worker.commitNewWork()
|
||||
}
|
||||
|
||||
func (self *Miner) Stop() {
|
||||
self.worker.stop()
|
||||
atomic.StoreInt32(&self.mining, 0)
|
||||
atomic.StoreInt32(&self.shouldStart, 0)
|
||||
}
|
||||
|
||||
func (self *Miner) Register(agent Agent) {
|
||||
if self.Mining() {
|
||||
agent.Start()
|
||||
}
|
||||
self.worker.register(agent)
|
||||
}
|
||||
|
||||
func (self *Miner) Unregister(agent Agent) {
|
||||
self.worker.unregister(agent)
|
||||
func (self *Miner) Close() {
|
||||
self.worker.close()
|
||||
close(self.exitCh)
|
||||
}
|
||||
|
||||
func (self *Miner) Mining() bool {
|
||||
return atomic.LoadInt32(&self.mining) > 0
|
||||
return self.worker.isRunning()
|
||||
}
|
||||
|
||||
func (self *Miner) HashRate() (tot int64) {
|
||||
func (self *Miner) HashRate() uint64 {
|
||||
if pow, ok := self.engine.(consensus.PoW); ok {
|
||||
tot += int64(pow.Hashrate())
|
||||
return uint64(pow.Hashrate())
|
||||
}
|
||||
// do we care this might race? is it worth we're rewriting some
|
||||
// aspects of the worker/locking up agents so we can get an accurate
|
||||
// hashrate?
|
||||
for agent := range self.worker.agents {
|
||||
if _, ok := agent.(*CpuAgent); !ok {
|
||||
tot += agent.GetHashRate()
|
||||
}
|
||||
}
|
||||
return
|
||||
return 0
|
||||
}
|
||||
|
||||
func (self *Miner) SetExtra(extra []byte) error {
|
||||
|
@ -162,6 +145,11 @@ func (self *Miner) SetExtra(extra []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetRecommitInterval sets the interval for sealing work resubmitting.
|
||||
func (self *Miner) SetRecommitInterval(interval time.Duration) {
|
||||
self.worker.setRecommitInterval(interval)
|
||||
}
|
||||
|
||||
// Pending returns the currently pending block and associated state.
|
||||
func (self *Miner) Pending() (*types.Block, *state.StateDB) {
|
||||
return self.worker.pending()
|
||||
|
|
|
@ -1,202 +0,0 @@
|
|||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package miner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type hashrate struct {
|
||||
ping time.Time
|
||||
rate uint64
|
||||
}
|
||||
|
||||
type RemoteAgent struct {
|
||||
mu sync.Mutex
|
||||
|
||||
quitCh chan struct{}
|
||||
workCh chan *Work
|
||||
returnCh chan<- *Result
|
||||
|
||||
chain consensus.ChainReader
|
||||
engine consensus.Engine
|
||||
currentWork *Work
|
||||
work map[common.Hash]*Work
|
||||
|
||||
hashrateMu sync.RWMutex
|
||||
hashrate map[common.Hash]hashrate
|
||||
|
||||
running int32 // running indicates whether the agent is active. Call atomically
|
||||
}
|
||||
|
||||
func NewRemoteAgent(chain consensus.ChainReader, engine consensus.Engine) *RemoteAgent {
|
||||
return &RemoteAgent{
|
||||
chain: chain,
|
||||
engine: engine,
|
||||
work: make(map[common.Hash]*Work),
|
||||
hashrate: make(map[common.Hash]hashrate),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) SubmitHashrate(id common.Hash, rate uint64) {
|
||||
a.hashrateMu.Lock()
|
||||
defer a.hashrateMu.Unlock()
|
||||
|
||||
a.hashrate[id] = hashrate{time.Now(), rate}
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) Work() chan<- *Work {
|
||||
return a.workCh
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) SetReturnCh(returnCh chan<- *Result) {
|
||||
a.returnCh = returnCh
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) Start() {
|
||||
if !atomic.CompareAndSwapInt32(&a.running, 0, 1) {
|
||||
return
|
||||
}
|
||||
a.quitCh = make(chan struct{})
|
||||
a.workCh = make(chan *Work, 1)
|
||||
go a.loop(a.workCh, a.quitCh)
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&a.running, 1, 0) {
|
||||
return
|
||||
}
|
||||
close(a.quitCh)
|
||||
close(a.workCh)
|
||||
}
|
||||
|
||||
// GetHashRate returns the accumulated hashrate of all identifier combined
|
||||
func (a *RemoteAgent) GetHashRate() (tot int64) {
|
||||
a.hashrateMu.RLock()
|
||||
defer a.hashrateMu.RUnlock()
|
||||
|
||||
// this could overflow
|
||||
for _, hashrate := range a.hashrate {
|
||||
tot += int64(hashrate.rate)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *RemoteAgent) GetWork() ([3]string, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var res [3]string
|
||||
|
||||
if a.currentWork != nil {
|
||||
block := a.currentWork.Block
|
||||
|
||||
res[0] = block.HashNoNonce().Hex()
|
||||
seedHash := ethash.SeedHash(block.NumberU64())
|
||||
res[1] = common.BytesToHash(seedHash).Hex()
|
||||
// Calculate the "target" to be returned to the external miner
|
||||
n := big.NewInt(1)
|
||||
n.Lsh(n, 255)
|
||||
n.Div(n, block.Difficulty())
|
||||
n.Lsh(n, 1)
|
||||
res[2] = common.BytesToHash(n.Bytes()).Hex()
|
||||
|
||||
a.work[block.HashNoNonce()] = a.currentWork
|
||||
return res, nil
|
||||
}
|
||||
return res, errors.New("No work available yet, don't panic.")
|
||||
}
|
||||
|
||||
// SubmitWork tries to inject a pow solution into the remote agent, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no work pending).
|
||||
func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.Hash) bool {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Make sure the work submitted is present
|
||||
work := a.work[hash]
|
||||
if work == nil {
|
||||
log.Info("Work submitted but none pending", "hash", hash)
|
||||
return false
|
||||
}
|
||||
// Make sure the Engine solutions is indeed valid
|
||||
result := work.Block.Header()
|
||||
result.Nonce = nonce
|
||||
result.MixDigest = mixDigest
|
||||
|
||||
if err := a.engine.VerifySeal(a.chain, result); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err)
|
||||
return false
|
||||
}
|
||||
block := work.Block.WithSeal(result)
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance
|
||||
a.returnCh <- &Result{work, block}
|
||||
delete(a.work, hash)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// loop monitors mining events on the work and quit channels, updating the internal
|
||||
// state of the remote miner until a termination is requested.
|
||||
//
|
||||
// Note, the reason the work and quit channels are passed as parameters is because
|
||||
// RemoteAgent.Start() constantly recreates these channels, so the loop code cannot
|
||||
// assume data stability in these member fields.
|
||||
func (a *RemoteAgent) loop(workCh chan *Work, quitCh chan struct{}) {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-quitCh:
|
||||
return
|
||||
case work := <-workCh:
|
||||
a.mu.Lock()
|
||||
a.currentWork = work
|
||||
a.mu.Unlock()
|
||||
case <-ticker.C:
|
||||
// cleanup
|
||||
a.mu.Lock()
|
||||
for hash, work := range a.work {
|
||||
if time.Since(work.createdAt) > 7*(12*time.Second) {
|
||||
delete(a.work, hash)
|
||||
}
|
||||
}
|
||||
a.mu.Unlock()
|
||||
|
||||
a.hashrateMu.Lock()
|
||||
for id, hashrate := range a.hashrate {
|
||||
if time.Since(hashrate.ping) > 10*time.Second {
|
||||
delete(a.hashrate, id)
|
||||
}
|
||||
}
|
||||
a.hashrateMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -67,6 +67,7 @@ func (msg *CallMsg) SetData(data []byte) { msg.msg.Data = common.CopyBytes
|
|||
func (msg *CallMsg) SetTo(address *Address) {
|
||||
if address == nil {
|
||||
msg.msg.To = nil
|
||||
return
|
||||
}
|
||||
msg.msg.To = &address.address
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// RemovePeer disconnects from a a remote node if the connection exists
|
||||
// RemovePeer disconnects from a remote node if the connection exists
|
||||
func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
|
@ -75,6 +75,37 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// AddTrustedPeer allows a remote node to always connect, even if slots are full
|
||||
func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
return false, ErrNodeStopped
|
||||
}
|
||||
node, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid enode: %v", err)
|
||||
}
|
||||
server.AddTrustedPeer(node)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RemoveTrustedPeer removes a remote node from the trusted peer set, but it
|
||||
// does not disconnect it automatically.
|
||||
func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
|
||||
// Make sure the server is running, fail otherwise
|
||||
server := api.node.Server()
|
||||
if server == nil {
|
||||
return false, ErrNodeStopped
|
||||
}
|
||||
node, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid enode: %v", err)
|
||||
}
|
||||
server.RemoveTrustedPeer(node)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PeerEvents creates an RPC subscription which receives peer events from the
|
||||
// node's p2p.Server
|
||||
func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
|
||||
|
|
|
@ -160,7 +160,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
|||
|
||||
// Find all non-empty buckets and get a fresh slice of their entries.
|
||||
var buckets [][]*Node
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
if len(b.entries) > 0 {
|
||||
buckets = append(buckets, b.entries[:])
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ func (tab *Table) copyLiveNodes() {
|
|||
defer tab.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
if now.Sub(n.addedAt) >= seedMinTableTime {
|
||||
tab.db.updateNode(n)
|
||||
|
@ -524,7 +524,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
|
|||
// obviously correct. I believe that tree-based buckets would make
|
||||
// this easier to implement efficiently.
|
||||
close := &nodesByDistance{target: target}
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
close.push(n, nresults)
|
||||
}
|
||||
|
@ -533,7 +533,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
|
|||
}
|
||||
|
||||
func (tab *Table) len() (n int) {
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
n += len(b.entries)
|
||||
}
|
||||
return n
|
||||
|
|
|
@ -1228,7 +1228,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
|
|||
if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash {
|
||||
return nil, errors.New("topic hash mismatch")
|
||||
}
|
||||
if data.Idx < 0 || int(data.Idx) >= len(data.Topics) {
|
||||
if int(data.Idx) < 0 || int(data.Idx) >= len(data.Topics) {
|
||||
return nil, errors.New("topic index out of range")
|
||||
}
|
||||
return pongpkt.data.(*pong), nil
|
||||
|
|
|
@ -81,7 +81,7 @@ func (tab *Table) chooseBucketRefreshTarget() common.Hash {
|
|||
if printTable {
|
||||
fmt.Println()
|
||||
}
|
||||
for i, b := range tab.buckets {
|
||||
for i, b := range &tab.buckets {
|
||||
entries += len(b.entries)
|
||||
if printTable {
|
||||
for _, e := range b.entries {
|
||||
|
@ -93,7 +93,7 @@ func (tab *Table) chooseBucketRefreshTarget() common.Hash {
|
|||
prefix := binary.BigEndian.Uint64(tab.self.sha[0:8])
|
||||
dist := ^uint64(0)
|
||||
entry := int(randUint(uint32(entries + 1)))
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
if entry < len(b.entries) {
|
||||
n := b.entries[entry]
|
||||
dist = binary.BigEndian.Uint64(n.sha[0:8]) ^ prefix
|
||||
|
@ -121,7 +121,7 @@ func (tab *Table) readRandomNodes(buf []*Node) (n int) {
|
|||
// TODO: tree-based buckets would help here
|
||||
// Find all non-empty buckets and get a fresh slice of their entries.
|
||||
var buckets [][]*Node
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
if len(b.entries) > 0 {
|
||||
buckets = append(buckets, b.entries[:])
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
|
|||
// obviously correct. I believe that tree-based buckets would make
|
||||
// this easier to implement efficiently.
|
||||
close := &nodesByDistance{target: target}
|
||||
for _, b := range tab.buckets {
|
||||
for _, b := range &tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
close.push(n, nresults)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue