rebase to light-stable, light flags

This commit is contained in:
Daniel Whitenack 2016-07-03 14:44:31 -05:00
parent 8ac7e83305
commit 994aaac679
404 changed files with 4124 additions and 158092 deletions

261
src/Godeps/Godeps.json generated
View File

@ -11,9 +11,8 @@
"Rev": "593e01cfc4f3353585015321e01951d4a907d3ef"
},
{
"ImportPath": "github.com/codegangsta/cli",
"Comment": "v1.17.0-67-ge5bef42",
"Rev": "e5bef42c62aa7d25aba4880dc02b7624f01e9e19"
"ImportPath": "github.com/aristanetworks/goarista/atime",
"Rev": "41405b70e69314415c378d9456fd01075f2ad2f2"
},
{
"ImportPath": "github.com/ethereum/ethash",
@ -22,243 +21,238 @@
},
{
"ImportPath": "github.com/ethereum/go-ethereum/accounts",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/accounts/abi",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/accounts/abi/bind",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/cmd/utils",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common/compiler",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common/httpclient",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common/mclock",
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common/registrar",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/common/registrar/ethreg",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/core",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/core/state",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/core/types",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/core/vm",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/crypto",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/crypto/ecies",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/crypto/randentropy",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/crypto/secp256k1",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/crypto/sha3",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/eth",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/eth/downloader",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/eth/fetcher",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/eth/filters",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/eth/gasprice",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/ethapi",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/ethdb",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/event",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/event/filter",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/internal/debug",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/jsre",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"ImportPath": "github.com/ethereum/go-ethereum/internal/ethapi",
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/les",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/les/flowcontrol",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/light",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/logger",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/logger/glog",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/metrics",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/miner",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/node",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/p2p",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/p2p/discover",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/p2p/nat",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/params",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/pow",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/release",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/rlp",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/rpc",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/trie",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/ethereum/go-ethereum/whisper",
"Comment": "v1.0.1-898-g6b7ca77",
"Rev": "6b7ca77653ec3b39c0871b5128d7d14ceb2234d9"
},
{
"ImportPath": "github.com/fatih/color",
"Comment": "v0.1-17-g533cd7f",
"Rev": "533cd7fd8a85905f67a1753afb4deddc85ea174f"
"Comment": "v1.0.1-1001-g66560ee",
"Rev": "66560ee28d836e91ac50bf2d6236b7ee4c9ecadf"
},
{
"ImportPath": "github.com/golang/snappy",
@ -305,14 +299,6 @@
"Comment": "v1.0.1-4-g1fa385a",
"Rev": "1fa385a6f45828c83361136b45b1a21a12139493"
},
{
"ImportPath": "github.com/mattn/go-colorable",
"Rev": "9cbef7c35391cca05f15f8181dc0b18bc9736dbb"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
},
{
"ImportPath": "github.com/microsoft/go-winio",
"Comment": "v0.3.5",
@ -323,10 +309,6 @@
"Comment": "v1.0-11-gc55201b",
"Rev": "c55201b036063326c5b1b89ccfe45a184973d073"
},
{
"ImportPath": "github.com/peterh/liner",
"Rev": "72909af234e0e355af10d0ace679446a6c5d7ec3"
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.4.0",
@ -340,34 +322,6 @@
"ImportPath": "github.com/rjeczalik/notify",
"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
},
{
"ImportPath": "github.com/robertkrimen/otto",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/ast",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/dbg",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/file",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/parser",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/registry",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/robertkrimen/otto/token",
"Rev": "610cd3ae864b5f0977234663966e6108c63b0f8e"
},
{
"ImportPath": "github.com/rs/cors",
"Rev": "3ca2b550f6a4333b63c845850f760a7d00412cd6"
@ -456,10 +410,6 @@
"ImportPath": "golang.org/x/net/websocket",
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "f64b50fbea64174967a8882830d621a18ee1548e"
},
{
"ImportPath": "golang.org/x/sys/windows",
"Rev": "f64b50fbea64174967a8882830d621a18ee1548e"
@ -542,14 +492,9 @@
"Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
},
{
"ImportPath": "gopkg.in/sourcemap.v1",
"Comment": "v1.0.2",
"Rev": "8cd1e2e0ddc78dcb4511bcb184d23171bed15c67"
},
{
"ImportPath": "gopkg.in/sourcemap.v1/base64vlq",
"Comment": "v1.0.2",
"Rev": "8cd1e2e0ddc78dcb4511bcb184d23171bed15c67"
"ImportPath": "gopkg.in/urfave/cli.v1",
"Comment": "v1.18.0",
"Rev": "1efa31f08b9333f1bd4882d61f9d668a70cd902e"
}
]
}

View File

@ -24,7 +24,7 @@ func createAccount(password string) (string, string, error) {
if currentNode != nil {
w := true
keydir := datadir + "/keystore"
keydir := datadir + "/testnet/keystore"
accman := accounts.NewManager(keydir, scryptN, scryptP, accountSync)
// generate the account

View File

@ -29,6 +29,14 @@ func TestAccountBindings(t *testing.T) {
t.Error("Test failed: could not create account")
}
// unlock the created account
err = unlockAccount(address, "badpassword", 3)
if err != nil {
fmt.Println(err)
t.Error("Test failed: could not unlock account")
}
time.Sleep(2 * time.Second)
// test to see if the account was injected in whisper
whisperInstance := (*accountSync)[0].(*whisper.Whisper)
identitySucess := whisperInstance.HasIdentity(crypto.ToECDSAPub(common.FromHex(pubkey)))
@ -50,13 +58,6 @@ func TestAccountBindings(t *testing.T) {
t.Error("Test failed: Could not post to whisper")
}
// unlock the created account
err = unlockAccount(address, "badpassword", 10)
if err != nil {
fmt.Println(err)
t.Error("Test failed: could not unlock account")
}
// clean up
err = os.RemoveAll(".ethereumtest")
if err != nil {

View File

@ -5,7 +5,6 @@ import (
"fmt"
"runtime"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/logger"
@ -14,6 +13,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/release"
"github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1"
)
const (
@ -50,7 +50,8 @@ func MakeNode(inputDir string) *node.Node {
// TODO remove admin rpcapi flag
set := flag.NewFlagSet("test", 0)
set.Bool("shh", true, "whisper")
set.Bool("noeth", true, "disable eth")
set.Bool("light", true, "disable eth")
set.Bool("testnet", true, "light test network")
set.Bool("rpc", true, "enable rpc")
set.String("rpcaddr", "localhost", "host for RPC")
set.String("rpcport", "8545", "rpc port")

177
src/vendor/github.com/aristanetworks/goarista/COPYING generated vendored Normal file
View File

@ -0,0 +1,177 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,6 @@
// Copyright (C) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// This file is intentionally empty.
// It's a workaround for https://github.com/golang/go/issues/15006

View File

@ -0,0 +1,24 @@
// Copyright (C) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package atime provides a fast monotonic clock source.
package atime
import (
_ "unsafe" // required to use //go:linkname
)
//go:noescape
//go:linkname nanotime runtime.nanotime
func nanotime() int64
// NanoTime returns the current time in nanoseconds from a monotonic clock.
// The time returned is based on some arbitrary platform-specific point in the
// past. The time returned is guaranteed to increase monotonically at a
// constant rate, unlike time.Now() from the Go standard library, which may
// slow down, speed up, jump forward or backward, due to NTP activity or leap
// seconds.
func NanoTime() uint64 {
return uint64(nanotime())
}

View File

@ -1,21 +0,0 @@
Copyright (C) 2013 Jeremy Saenz
All Rights Reserved.
MIT LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -28,15 +28,16 @@ import (
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
// have any code associated with it (i.e. suicided).
//
// Please note, this error string is part of the RPC API and is expected by the
// native contract bindings to signal this particular error. Do not change this
// as it will break all dependent code!
var ErrNoCode = errors.New("no contract code at given address")
// ContractCaller defines the methods needed to allow operating with contract on a read
// only basis.
type ContractCaller interface {
// HasCode checks if the contract at the given address has any code associated
// with it or not. This is needed to differentiate between contract internal
// errors and the local chain being out of sync.
HasCode(ctx context.Context, contract common.Address, pending bool) (bool, error)
// ContractCall executes an Ethereum contract call with the specified data as
// the input. The pending flag requests execution against the pending block, not
// the stable head of the chain.
@ -56,6 +57,11 @@ type ContractTransactor interface {
// execution of a transaction.
SuggestGasPrice(ctx context.Context) (*big.Int, error)
// HasCode checks if the contract at the given address has any code associated
// with it or not. This is needed to differentiate between contract internal
// errors and the local chain being out of sync.
HasCode(ctx context.Context, contract common.Address, pending bool) (bool, error)
// EstimateGasLimit tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as other
@ -69,7 +75,38 @@ type ContractTransactor interface {
// ContractBackend defines the methods needed to allow operating with contract
// on a read-write basis.
//
// This interface is essentially the union of ContractCaller and ContractTransactor
// but due to a bug in the Go compiler (https://github.com/golang/go/issues/6977),
// we cannot simply list it as the two interfaces. The other solution is to add a
// third interface containing the common methods, but that convolutes the user API
// as it introduces yet another parameter to require for initialization.
type ContractBackend interface {
ContractCaller
ContractTransactor
// HasCode checks if the contract at the given address has any code associated
// with it or not. This is needed to differentiate between contract internal
// errors and the local chain being out of sync.
HasCode(ctx context.Context, contract common.Address, pending bool) (bool, error)
// ContractCall executes an Ethereum contract call with the specified data as
// the input. The pending flag requests execution against the pending block, not
// the stable head of the chain.
ContractCall(ctx context.Context, contract common.Address, data []byte, pending bool) ([]byte, error)
// PendingAccountNonce retrieves the current pending nonce associated with an
// account.
PendingAccountNonce(ctx context.Context, account common.Address) (uint64, error)
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
// execution of a transaction.
SuggestGasPrice(ctx context.Context) (*big.Int, error)
// EstimateGasLimit tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as other
// transactions may be added or removed by miners, but it should provide a basis
// for setting a reasonable default.
EstimateGasLimit(ctx context.Context, sender common.Address, contract *common.Address, value *big.Int, data []byte) (*big.Int, error)
// SendTransaction injects the transaction into the pending pool for execution.
SendTransaction(ctx context.Context, tx *types.Transaction) error
}

View File

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/big"
"sync/atomic"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
@ -36,7 +37,7 @@ type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, erro
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
Ctx context.Context // unlike usual practice, this context can be left uninitialized (nil)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
@ -50,7 +51,7 @@ type TransactOpts struct {
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
GasLimit *big.Int // Gas limit to set for the transaction execution (nil = estimate + 10%)
Ctx context.Context // unlike usual practice, this context can be left uninitialized (nil)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// BoundContract is the base wrapper object that reflects a contract on the
@ -61,6 +62,9 @@ type BoundContract struct {
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
caller ContractCaller // Read interface to interact with the blockchain
transactor ContractTransactor // Write interface to interact with the blockchain
latestHasCode uint32 // Cached verification that the latest state contains code for this contract
pendingHasCode uint32 // Cached verification that the pending state contains code for this contract
}
// NewBoundContract creates a low level contract interface through which calls
@ -101,12 +105,25 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
if opts == nil {
opts = new(CallOpts)
}
// Make sure we have a contract to operate on, and bail out otherwise
if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
if code, err := c.caller.HasCode(opts.Context, c.address, opts.Pending); err != nil {
return err
} else if !code {
return ErrNoCode
}
if opts.Pending {
atomic.StoreUint32(&c.pendingHasCode, 1)
} else {
atomic.StoreUint32(&c.latestHasCode, 1)
}
}
// Pack the input, call and unpack the results
input, err := c.abi.Pack(method, params...)
if err != nil {
return err
}
output, err := c.caller.ContractCall(opts.Ctx, c.address, input, opts.Pending)
output, err := c.caller.ContractCall(opts.Context, c.address, input, opts.Pending)
if err != nil {
return err
}
@ -141,7 +158,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
}
nonce := uint64(0)
if opts.Nonce == nil {
nonce, err = c.transactor.PendingAccountNonce(opts.Ctx, opts.From)
nonce, err = c.transactor.PendingAccountNonce(opts.Context, opts.From)
if err != nil {
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err)
}
@ -151,14 +168,24 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
// Figure out the gas allowance and gas price values
gasPrice := opts.GasPrice
if gasPrice == nil {
gasPrice, err = c.transactor.SuggestGasPrice(opts.Ctx)
gasPrice, err = c.transactor.SuggestGasPrice(opts.Context)
if err != nil {
return nil, fmt.Errorf("failed to suggest gas price: %v", err)
}
}
gasLimit := opts.GasLimit
if gasLimit == nil {
gasLimit, err = c.transactor.EstimateGasLimit(opts.Ctx, opts.From, contract, value, input)
// Gas estimation cannot succeed without code for method invocations
if contract != nil && atomic.LoadUint32(&c.pendingHasCode) == 0 {
if code, err := c.transactor.HasCode(opts.Context, c.address, true); err != nil {
return nil, err
} else if !code {
return nil, ErrNoCode
}
atomic.StoreUint32(&c.pendingHasCode, 1)
}
// If the contract surely has code (or code is not needed), estimate the transaction
gasLimit, err = c.transactor.EstimateGasLimit(opts.Context, opts.From, contract, value, input)
if err != nil {
return nil, fmt.Errorf("failed to exstimate gas needed: %v", err)
}
@ -177,7 +204,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
if err != nil {
return nil, err
}
if err := c.transactor.SendTransaction(opts.Ctx, signedTx); err != nil {
if err := c.transactor.SendTransaction(opts.Context, signedTx); err != nil {
return nil, err
}
return signedTx, nil

View File

@ -62,7 +62,7 @@ func (m Method) pack(method Method, args ...interface{}) ([]byte, error) {
// calculate the offset
offset := len(method.Inputs)*32 + len(variableInput)
// set the offset
ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
ret = append(ret, packNum(reflect.ValueOf(offset))...)
// Append the packed output to the variable input. The variable input
// will be appended at the end of the input.
variableInput = append(variableInput, packed...)

View File

@ -61,54 +61,20 @@ func U256(n *big.Int) []byte {
return common.LeftPadBytes(common.U256(n).Bytes(), 32)
}
func S256(n *big.Int) []byte {
sint := common.S256(n)
ret := common.LeftPadBytes(sint.Bytes(), 32)
if sint.Cmp(common.Big0) < 0 {
for i, b := range ret {
if b == 0 {
ret[i] = 1
continue
}
break
}
}
return ret
}
// S256 will ensure signed 256bit on big nums
func U2U256(n uint64) []byte {
return U256(big.NewInt(int64(n)))
}
func S2S256(n int64) []byte {
return S256(big.NewInt(n))
}
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
func packNum(value reflect.Value, to byte) []byte {
func packNum(value reflect.Value) []byte {
switch kind := value.Kind(); kind {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if to == UintTy {
return U2U256(value.Uint())
} else {
return S2S256(int64(value.Uint()))
}
return U2U256(value.Uint())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if to == UintTy {
return U2U256(uint64(value.Int()))
} else {
return S2S256(value.Int())
}
return U2U256(uint64(value.Int()))
case reflect.Ptr:
// This only takes care of packing and casting. No type checking is done here. It should be done prior to using this function.
if to == UintTy {
return U256(value.Interface().(*big.Int))
} else {
return S256(value.Interface().(*big.Int))
}
return U256(value.Interface().(*big.Int))
}
return nil

View File

@ -25,7 +25,7 @@ import (
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
// bytes slice
func packBytesSlice(bytes []byte, l int) []byte {
len := packNum(reflect.ValueOf(l), UintTy)
len := packNum(reflect.ValueOf(l))
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
}
@ -34,7 +34,7 @@ func packBytesSlice(bytes []byte, l int) []byte {
func packElement(t Type, reflectValue reflect.Value) []byte {
switch t.T {
case IntTy, UintTy:
return packNum(reflectValue, t.T)
return packNum(reflectValue)
case StringTy:
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len())
case AddressTy:

View File

@ -155,9 +155,21 @@ func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err
return crypto.Sign(hash, unlockedKey.PrivateKey)
}
// SignWithPassphrase signs hash if the private key matching the given address can be
// decrypted with the given passphrase.
func (am *Manager) SignWithPassphrase(addr common.Address, passphrase string, hash []byte) (signature []byte, err error) {
_, key, err := am.getDecryptedKey(Account{Address: addr}, passphrase)
if err != nil {
return nil, err
}
defer zeroKey(key.PrivateKey)
return crypto.Sign(hash, key.PrivateKey)
}
// Unlock unlocks the given account indefinitely.
func (am *Manager) Unlock(a Account, keyAuth string) error {
return am.TimedUnlock(a, keyAuth, 0)
func (am *Manager) Unlock(a Account, passphrase string) error {
return am.TimedUnlock(a, passphrase, 0)
}
// Lock removes the private key with the given address from memory.
@ -180,7 +192,6 @@ func (am *Manager) Lock(addr common.Address) error {
// shortens the active unlock timeout. If the address was previously unlocked
// indefinitely the timeout is not altered.
func (am *Manager) TimedUnlock(a Account, passphrase string, timeout time.Duration) error {
a, key, err := am.getDecryptedKey(a, passphrase)
if err != nil {
return err
@ -220,6 +231,7 @@ func (am *Manager) TimedUnlock(a Account, passphrase string, timeout time.Durati
}
func (am *Manager) syncAccounts(a string, key *Key) error {
for _, service := range *am.sync {
if whisperInstance, ok := service.(*whisper.Whisper); ok && key.WhisperEnabled {
err := whisperInstance.InjectIdentity(key.PrivateKey)

View File

@ -20,9 +20,9 @@ import (
"fmt"
"strings"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1"
)
// NewRemoteRPCClient returns a RPC client which connects to a running geth instance.

View File

@ -24,7 +24,7 @@ import (
"path"
"strings"
"github.com/codegangsta/cli"
"gopkg.in/urfave/cli.v1"
)
// Custom type which is registered in the flags library which cli uses for

View File

@ -30,7 +30,6 @@ import (
"strings"
"time"
"github.com/codegangsta/cli"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
@ -54,6 +53,7 @@ import (
"github.com/ethereum/go-ethereum/release"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/whisper"
"gopkg.in/urfave/cli.v1"
)
func init() {
@ -319,7 +319,7 @@ var (
Name: "exec",
Usage: "Execute JavaScript statement (only in combination with console/attach)",
}
PreLoadJSFlag = cli.StringFlag{
PreloadJSFlag = cli.StringFlag{
Name: "preload",
Usage: "Comma separated list of JavaScript files to preload into the console",
}
@ -686,7 +686,7 @@ func MakeSystemNode(name, version string, relconf release.Config, extra []byte,
DataDir: MustMakeDataDir(ctx),
PrivateKey: MakeNodeKey(ctx),
Name: MakeNodeName(name, version, ctx),
NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name),
NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name), // light client hack
BootstrapNodes: MakeBootstrapNodes(ctx),
ListenAddr: MakeListenAddress(ctx),
NAT: MakeNAT(ctx),
@ -745,7 +745,6 @@ func MakeSystemNode(name, version string, relconf release.Config, extra []byte,
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
}
// Configure the Whisper service
shhEnable := ctx.GlobalBool(WhisperEnabledFlag.Name)
@ -873,7 +872,7 @@ func MustMakeChainConfig(ctx *cli.Context) *core.ChainConfig {
// MustMakeChainConfigFromDb reads the chain configuration from the given database.
func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfig {
genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0))
genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0), 0)
if genesis != nil {
// Existing genesis block, use stored config if available.
@ -893,15 +892,24 @@ func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainC
return &core.ChainConfig{HomesteadBlock: homesteadBlockNo}
}
func ChainDbName(ctx *cli.Context) string {
if ctx.GlobalBool(LightModeFlag.Name) {
return "lightchaindata"
} else {
return "chaindata"
}
}
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
func MakeChainDatabase(ctx *cli.Context) ethdb.Database {
var (
datadir = MustMakeDataDir(ctx)
cache = ctx.GlobalInt(CacheFlag.Name)
handles = MakeDatabaseHandles()
name = ChainDbName(ctx)
)
chainDb, err := ethdb.NewLDBDatabase(filepath.Join(datadir, "chaindata"), cache, handles)
chainDb, err := ethdb.NewLDBDatabase(filepath.Join(datadir, name), cache, handles)
if err != nil {
Fatalf("Could not open database: %v", err)
}
@ -931,3 +939,20 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
}
return chain, chainDb
}
// MakeConsolePreloads retrieves the absolute paths for the console JavaScript
// scripts to preload before starting.
func MakeConsolePreloads(ctx *cli.Context) []string {
// Skip preloading if there's nothing to preload
if ctx.GlobalString(PreloadJSFlag.Name) == "" {
return nil
}
// Otherwise resolve absolute paths and return them
preloads := []string{}
assets := ctx.GlobalString(JSpathFlag.Name)
for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") {
preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file)))
}
return preloads
}

View File

@ -1,98 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"fmt"
"strings"
"github.com/peterh/liner"
)
// Holds the stdin line reader.
// Only this reader may be used for input because it keeps
// an internal buffer.
var Stdin = newUserInputReader()
type userInputReader struct {
*liner.State
warned bool
supported bool
normalMode liner.ModeApplier
rawMode liner.ModeApplier
}
func newUserInputReader() *userInputReader {
r := new(userInputReader)
// Get the original mode before calling NewLiner.
// This is usually regular "cooked" mode where characters echo.
normalMode, _ := liner.TerminalMode()
// Turn on liner. It switches to raw mode.
r.State = liner.NewLiner()
rawMode, err := liner.TerminalMode()
if err != nil || !liner.TerminalSupported() {
r.supported = false
} else {
r.supported = true
r.normalMode = normalMode
r.rawMode = rawMode
// Switch back to normal mode while we're not prompting.
normalMode.ApplyMode()
}
return r
}
func (r *userInputReader) Prompt(prompt string) (string, error) {
if r.supported {
r.rawMode.ApplyMode()
defer r.normalMode.ApplyMode()
} else {
// liner tries to be smart about printing the prompt
// and doesn't print anything if input is redirected.
// Un-smart it by printing the prompt always.
fmt.Print(prompt)
prompt = ""
defer fmt.Println()
}
return r.State.Prompt(prompt)
}
func (r *userInputReader) PasswordPrompt(prompt string) (passwd string, err error) {
if r.supported {
r.rawMode.ApplyMode()
defer r.normalMode.ApplyMode()
return r.State.PasswordPrompt(prompt)
}
if !r.warned {
fmt.Println("!! Unsupported terminal, password will be echoed.")
r.warned = true
}
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
fmt.Print(prompt)
passwd, err = r.State.Prompt("")
fmt.Println()
return passwd, err
}
func (r *userInputReader) ConfirmPrompt(prompt string) (bool, error) {
prompt = prompt + " [y/N] "
input, err := r.Prompt(prompt)
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
}
return false, err
}

View File

@ -1,301 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"encoding/json"
"fmt"
"time"
"github.com/ethereum/go-ethereum/jsre"
"github.com/ethereum/go-ethereum/rpc"
"github.com/robertkrimen/otto"
)
type Jeth struct {
re *jsre.JSRE
client rpc.Client
}
// NewJeth create a new backend for the JSRE console
func NewJeth(re *jsre.JSRE, client rpc.Client) *Jeth {
return &Jeth{re, client}
}
// err returns an error object for the given error code and message.
func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
m := rpc.JSONErrResponse{
Version: "2.0",
Id: id,
Error: rpc.JSONError{
Code: code,
Message: msg,
},
}
errObj, _ := json.Marshal(m.Error)
errRes, _ := json.Marshal(m)
call.Otto.Run("ret_error = " + string(errObj))
res, _ := call.Otto.Run("ret_response = " + string(errRes))
return res
}
// UnlockAccount asks the user for the password and than executes the jeth.UnlockAccount callback in the jsre.
// It will need the public address for the account to unlock as first argument.
// The second argument is an optional string with the password. If not given the user is prompted for the password.
// The third argument is an optional integer which specifies for how long the account will be unlocked (in seconds).
func (self *Jeth) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
var account, passwd otto.Value
duration := otto.NullValue()
if !call.Argument(0).IsString() {
fmt.Println("first argument must be the account to unlock")
return otto.FalseValue()
}
account = call.Argument(0)
// if password is not given or as null value -> ask user for password
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
fmt.Printf("Unlock account %s\n", account)
if input, err := Stdin.PasswordPrompt("Passphrase: "); err != nil {
throwJSExeception(err.Error())
} else {
passwd, _ = otto.ToValue(input)
}
} else {
if !call.Argument(1).IsString() {
throwJSExeception("password must be a string")
}
passwd = call.Argument(1)
}
// third argument is the duration how long the account must be unlocked.
// verify that its a number.
if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
if !call.Argument(2).IsNumber() {
throwJSExeception("unlock duration must be a number")
}
duration = call.Argument(2)
}
// jeth.unlockAccount will send the request to the backend.
if val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration); err == nil {
return val
} else {
throwJSExeception(err.Error())
}
return otto.FalseValue()
}
// NewAccount asks the user for the password and than executes the jeth.newAccount callback in the jsre
func (self *Jeth) NewAccount(call otto.FunctionCall) (response otto.Value) {
var passwd string
if len(call.ArgumentList) == 0 {
var err error
passwd, err = Stdin.PasswordPrompt("Passphrase: ")
if err != nil {
return otto.FalseValue()
}
passwd2, err := Stdin.PasswordPrompt("Repeat passphrase: ")
if err != nil {
return otto.FalseValue()
}
if passwd != passwd2 {
fmt.Println("Passphrases don't match")
return otto.FalseValue()
}
} else if len(call.ArgumentList) == 1 && call.Argument(0).IsString() {
passwd, _ = call.Argument(0).ToString()
} else {
fmt.Println("expected 0 or 1 string argument")
return otto.FalseValue()
}
ret, err := call.Otto.Call("jeth.newAccount", nil, passwd)
if err == nil {
return ret
}
fmt.Println(err)
return otto.FalseValue()
}
// Send will serialize the first argument, send it to the node and returns the response.
func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) {
// verify we got a batch request (array) or a single request (object)
ro := call.Argument(0).Object()
if ro == nil || (ro.Class() != "Array" && ro.Class() != "Object") {
throwJSExeception("Internal Error: request must be an object or array")
}
// convert otto vm arguments to go values by JSON serialising and parsing.
data, err := call.Otto.Call("JSON.stringify", nil, ro)
if err != nil {
throwJSExeception(err.Error())
}
jsonreq, _ := data.ToString()
// parse arguments to JSON rpc requests, either to an array (batch) or to a single request.
var reqs []rpc.JSONRequest
batch := true
if err = json.Unmarshal([]byte(jsonreq), &reqs); err != nil {
// single request?
reqs = make([]rpc.JSONRequest, 1)
if err = json.Unmarshal([]byte(jsonreq), &reqs[0]); err != nil {
throwJSExeception("invalid request")
}
batch = false
}
call.Otto.Set("response_len", len(reqs))
call.Otto.Run("var ret_response = new Array(response_len);")
for i, req := range reqs {
if err := self.client.Send(&req); err != nil {
return self.err(call, -32603, err.Error(), req.Id)
}
result := make(map[string]interface{})
if err = self.client.Recv(&result); err != nil {
return self.err(call, -32603, err.Error(), req.Id)
}
id, _ := result["id"]
jsonver, _ := result["jsonrpc"]
call.Otto.Set("ret_id", id)
call.Otto.Set("ret_jsonrpc", jsonver)
call.Otto.Set("response_idx", i)
// call was successful
if res, ok := result["result"]; ok {
payload, _ := json.Marshal(res)
call.Otto.Set("ret_result", string(payload))
response, err = call.Otto.Run(`
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
`)
continue
}
// request returned an error
if res, ok := result["error"]; ok {
payload, _ := json.Marshal(res)
call.Otto.Set("ret_result", string(payload))
response, err = call.Otto.Run(`
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, error: JSON.parse(ret_result) };
`)
continue
}
return self.err(call, -32603, fmt.Sprintf("Invalid response"), new(int64))
}
if !batch {
call.Otto.Run("ret_response = ret_response[0];")
}
// if a callback was given execute it.
if call.Argument(1).IsObject() {
call.Otto.Set("callback", call.Argument(1))
call.Otto.Run(`
if (Object.prototype.toString.call(callback) == '[object Function]') {
callback(null, ret_response);
}
`)
}
return
}
// throwJSExeception panics on an otto value, the Otto VM will then throw msg as a javascript error.
func throwJSExeception(msg interface{}) otto.Value {
p, _ := otto.ToValue(msg)
panic(p)
}
// Sleep will halt the console for arg[0] seconds.
func (self *Jeth) Sleep(call otto.FunctionCall) (response otto.Value) {
if len(call.ArgumentList) >= 1 {
if call.Argument(0).IsNumber() {
sleep, _ := call.Argument(0).ToInteger()
time.Sleep(time.Duration(sleep) * time.Second)
return otto.TrueValue()
}
}
return throwJSExeception("usage: sleep(<sleep in seconds>)")
}
// SleepBlocks will wait for a specified number of new blocks or max for a
// given of seconds. sleepBlocks(nBlocks[, maxSleep]).
func (self *Jeth) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
nBlocks := int64(0)
maxSleep := int64(9999999999999999) // indefinitely
nArgs := len(call.ArgumentList)
if nArgs == 0 {
throwJSExeception("usage: sleepBlocks(<n blocks>[, max sleep in seconds])")
}
if nArgs >= 1 {
if call.Argument(0).IsNumber() {
nBlocks, _ = call.Argument(0).ToInteger()
} else {
throwJSExeception("expected number as first argument")
}
}
if nArgs >= 2 {
if call.Argument(1).IsNumber() {
maxSleep, _ = call.Argument(1).ToInteger()
} else {
throwJSExeception("expected number as second argument")
}
}
// go through the console, this will allow web3 to call the appropriate
// callbacks if a delayed response or notification is received.
currentBlockNr := func() int64 {
result, err := call.Otto.Run("eth.blockNumber")
if err != nil {
throwJSExeception(err.Error())
}
blockNr, err := result.ToInteger()
if err != nil {
throwJSExeception(err.Error())
}
return blockNr
}
targetBlockNr := currentBlockNr() + nBlocks
deadline := time.Now().Add(time.Duration(maxSleep) * time.Second)
for time.Now().Before(deadline) {
if currentBlockNr() >= targetBlockNr {
return otto.TrueValue()
}
time.Sleep(time.Second)
}
return otto.FalseValue()
}

View File

@ -149,7 +149,6 @@ func (sol *Solidity) Compile(source string) (map[string]*Contract, error) {
compilerOptions := strings.Join(params, " ")
cmd := exec.Command(sol.solcPath, params...)
cmd.Dir = wd
cmd.Stdin = strings.NewReader(source)
cmd.Stderr = stderr

View File

@ -0,0 +1,30 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// package mclock is a wrapper for a monotonic clock source
package mclock
import (
"time"
"github.com/aristanetworks/goarista/atime"
)
type AbsTime time.Duration // absolute monotonic time
func Now() AbsTime {
return AbsTime(atime.NanoTime())
}

View File

@ -1,72 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
)
// DisabledBadBlockReporting can be set to prevent blocks being reported.
var DisableBadBlockReporting = true
// ReportBlock reports the block to the block reporting tool found at
// badblocks.ethdev.com
func ReportBlock(block *types.Block, err error) {
if DisableBadBlockReporting {
return
}
const url = "https://badblocks.ethdev.com"
blockRlp, _ := rlp.EncodeToBytes(block)
data := map[string]interface{}{
"block": common.Bytes2Hex(blockRlp),
"errortype": err.Error(),
"hints": map[string]interface{}{
"receipts": "NYI",
"vmtrace": "NYI",
},
}
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "params": []interface{}{data}, "id": "1", "jsonrpc": "2.0"})
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
glog.V(logger.Error).Infoln("POST err:", err)
return
}
defer resp.Body.Close()
if glog.V(logger.Debug) {
glog.Infoln("response Status:", resp.Status)
glog.Infoln("response Headers:", resp.Header)
body, _ := ioutil.ReadAll(resp.Body)
glog.Infoln("response Body:", string(body))
}
}

View File

@ -72,7 +72,7 @@ func (v *BlockValidator) ValidateBlock(block *types.Block) error {
return &KnownBlockError{block.Number(), block.Hash()}
}
}
parent := v.bc.GetBlock(block.ParentHash())
parent := v.bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
return ParentError(block.ParentHash())
}
@ -292,7 +292,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x = params.MinimumDifficulty
x.Set(params.MinimumDifficulty)
}
// for the exponential factor
@ -325,7 +325,7 @@ func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *b
diff.Sub(parentDiff, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff = params.MinimumDifficulty
diff.Set(params.MinimumDifficulty)
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)

View File

@ -54,9 +54,7 @@ var (
)
const (
headerCacheLimit = 512
bodyCacheLimit = 256
tdCacheLimit = 1024
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
@ -151,7 +149,7 @@ func NewBlockChain(chainDb ethdb.Database, config *ChainConfig, pow pow.PoW, mux
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if header := bc.GetHeader(hash); header != nil {
if header := bc.GetHeaderByHash(hash); header != nil {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
bc.SetHead(header.Number.Uint64() - 1)
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
@ -175,7 +173,7 @@ func (self *BlockChain) loadLastState() error {
// Corrupt or empty database, init from scratch
self.Reset()
} else {
if block := self.GetBlock(head); block != nil {
if block := self.GetBlockByHash(head); block != nil {
// Block found, set as the current head
self.currentBlock = block
} else {
@ -186,7 +184,7 @@ func (self *BlockChain) loadLastState() error {
// Restore the last known head header
currentHeader := self.currentBlock.Header()
if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
if header := self.GetHeader(head); header != nil {
if header := self.GetHeaderByHash(head); header != nil {
currentHeader = header
}
}
@ -194,16 +192,16 @@ func (self *BlockChain) loadLastState() error {
// Restore the last known head fast block
self.currentFastBlock = self.currentBlock
if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
if block := self.GetBlock(head); block != nil {
if block := self.GetBlockByHash(head); block != nil {
self.currentFastBlock = block
}
}
// Issue a status log and return
headerTd := self.GetTd(self.hc.CurrentHeader().Hash())
blockTd := self.GetTd(self.currentBlock.Hash())
fastTd := self.GetTd(self.currentFastBlock.Hash())
headerTd := self.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
@ -218,8 +216,8 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.mu.Lock()
defer bc.mu.Unlock()
delFn := func(hash common.Hash) {
DeleteBody(bc.chainDb, hash)
delFn := func(hash common.Hash, num uint64) {
DeleteBody(bc.chainDb, hash, num)
}
bc.hc.SetHead(head, delFn)
@ -230,11 +228,12 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.futureBlocks.Purge()
// Update all computed fields to the new head
if bc.currentBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentBlock.NumberU64() {
bc.currentBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
currentHeader := bc.hc.CurrentHeader()
if bc.currentBlock != nil && currentHeader.Number.Uint64() < bc.currentBlock.NumberU64() {
bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
}
if bc.currentFastBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentFastBlock.NumberU64() {
bc.currentFastBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
if bc.currentFastBlock != nil && currentHeader.Number.Uint64() < bc.currentFastBlock.NumberU64() {
bc.currentFastBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
}
if bc.currentBlock == nil {
@ -257,7 +256,7 @@ func (bc *BlockChain) SetHead(head uint64) {
// irrelevant what the chain contents were prior.
func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
// Make sure that both the block as well at its state trie exists
block := self.GetBlock(hash)
block := self.GetBlockByHash(hash)
if block == nil {
return fmt.Errorf("non existent block [%x…]", hash[:4])
}
@ -313,7 +312,7 @@ func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesis
self.mu.RLock()
defer self.mu.RUnlock()
return self.GetTd(self.currentBlock.Hash()), self.currentBlock.Hash(), self.genesisBlock.Hash()
return self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64()), self.currentBlock.Hash(), self.genesisBlock.Hash()
}
// SetProcessor sets the processor required for making state modifications.
@ -367,7 +366,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
defer bc.mu.Unlock()
// Prepare the genesis block and reinitialise the chain
if err := bc.hc.WriteTd(genesis.Hash(), genesis.Difficulty()); err != nil {
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err)
}
if err := WriteBlock(bc.chainDb, genesis); err != nil {
@ -457,7 +456,7 @@ func (self *BlockChain) GetBody(hash common.Hash) *types.Body {
body := cached.(*types.Body)
return body
}
body := GetBody(self.chainDb, hash)
body := GetBody(self.chainDb, hash, self.hc.GetBlockNumber(hash))
if body == nil {
return nil
}
@ -473,7 +472,7 @@ func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
if cached, ok := self.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
body := GetBodyRLP(self.chainDb, hash)
body := GetBodyRLP(self.chainDb, hash, self.hc.GetBlockNumber(hash))
if len(body) == 0 {
return nil
}
@ -485,14 +484,14 @@ func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *BlockChain) HasBlock(hash common.Hash) bool {
return bc.GetBlock(hash) != nil
return bc.GetBlockByHash(hash) != nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash)
block := bc.GetBlockByHash(hash)
if block == nil {
return false
}
@ -501,13 +500,14 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash) bool {
return err == nil
}
// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *BlockChain) GetBlock(hash common.Hash) *types.Block {
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
func (self *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := GetBlock(self.chainDb, hash)
block := GetBlock(self.chainDb, hash, number)
if block == nil {
return nil
}
@ -516,6 +516,11 @@ func (self *BlockChain) GetBlock(hash common.Hash) *types.Block {
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (self *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
return self.GetBlock(hash, self.hc.GetBlockNumber(hash))
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block {
@ -523,19 +528,21 @@ func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block {
if hash == (common.Hash{}) {
return nil
}
return self.GetBlock(hash)
return self.GetBlock(hash, number)
}
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := self.hc.GetBlockNumber(hash)
for i := 0; i < n; i++ {
block := self.GetBlock(hash)
block := self.GetBlock(hash, number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
number--
}
return
}
@ -546,7 +553,7 @@ func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*type
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = self.GetBlock(block.ParentHash())
block = self.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
@ -596,20 +603,52 @@ func (self *BlockChain) Rollback(chain []common.Hash) {
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
if self.hc.CurrentHeader().Hash() == hash {
self.hc.SetCurrentHeader(self.GetHeader(self.hc.CurrentHeader().ParentHash))
currentHeader := self.hc.CurrentHeader()
if currentHeader.Hash() == hash {
self.hc.SetCurrentHeader(self.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
}
if self.currentFastBlock.Hash() == hash {
self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash())
self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash(), self.currentFastBlock.NumberU64()-1)
WriteHeadFastBlockHash(self.chainDb, self.currentFastBlock.Hash())
}
if self.currentBlock.Hash() == hash {
self.currentBlock = self.GetBlock(self.currentBlock.ParentHash())
self.currentBlock = self.GetBlock(self.currentBlock.ParentHash(), self.currentBlock.NumberU64()-1)
WriteHeadBlockHash(self.chainDb, self.currentBlock.Hash())
}
}
}
// SetReceiptsData computes all the non-consensus fields of the receipts
func SetReceiptsData(block *types.Block, receipts types.Receipts) {
transactions, logIndex := block.Transactions(), uint(0)
for j := 0; j < len(receipts); j++ {
// The transaction hash can be retrieved from the transaction itself
receipts[j].TxHash = transactions[j].Hash()
// The contract address can be derived from the transaction itself
if MessageCreatesContract(transactions[j]) {
from, _ := transactions[j].From()
receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
}
// The used gas can be calculated based on previous receipts
if j == 0 {
receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
} else {
receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
}
// The derived log fields can simply be set from the block and transaction
for k := 0; k < len(receipts[j].Logs); k++ {
receipts[j].Logs[k].BlockNumber = block.NumberU64()
receipts[j].Logs[k].BlockHash = block.Hash()
receipts[j].Logs[k].TxHash = receipts[j].TxHash
receipts[j].Logs[k].TxIndex = uint(j)
receipts[j].Logs[k].Index = logIndex
logIndex++
}
}
}
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
@ -651,40 +690,15 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
continue
}
// Compute all the non-consensus fields of the receipts
transactions, logIndex := block.Transactions(), uint(0)
for j := 0; j < len(receipts); j++ {
// The transaction hash can be retrieved from the transaction itself
receipts[j].TxHash = transactions[j].Hash()
// The contract address can be derived from the transaction itself
if MessageCreatesContract(transactions[j]) {
from, _ := transactions[j].From()
receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
}
// The used gas can be calculated based on previous receipts
if j == 0 {
receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
} else {
receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
}
// The derived log fields can simply be set from the block and transaction
for k := 0; k < len(receipts[j].Logs); k++ {
receipts[j].Logs[k].BlockNumber = block.NumberU64()
receipts[j].Logs[k].BlockHash = block.Hash()
receipts[j].Logs[k].TxHash = receipts[j].TxHash
receipts[j].Logs[k].TxIndex = uint(j)
receipts[j].Logs[k].Index = logIndex
logIndex++
}
}
SetReceiptsData(block, receipts)
// Write all the data out into the database
if err := WriteBody(self.chainDb, block.Hash(), block.Body()); err != nil {
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
errs[index] = fmt.Errorf("failed to write block body: %v", err)
atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index])
return
}
if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index])
@ -737,7 +751,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
// Update the head fast sync block if better
self.mu.Lock()
head := blockChain[len(errs)-1]
if self.GetTd(self.currentFastBlock.Hash()).Cmp(self.GetTd(head.Hash())) < 0 {
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
glog.Fatalf("failed to update head fast block hash: %v", err)
}
@ -759,12 +773,12 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
defer self.wg.Done()
// Calculate the total difficulty of the block
ptd := self.GetTd(block.ParentHash())
ptd := self.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
return NonStatTy, ParentError(block.ParentHash())
}
localTd := self.GetTd(self.currentBlock.Hash())
localTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Make sure no inconsistent state is leaked during insertion
@ -788,7 +802,7 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
status = SideStatTy
}
// Irrelevant of the canonical status, write the block itself to the database
if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
@ -819,6 +833,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
tstart = time.Now()
nonceChecked = make([]bool, len(chain))
statedb *state.StateDB
)
// Start the parallel nonce verifier.
@ -885,7 +900,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Create a new statedb using the parent block and report an
// error if it fails.
statedb, err := state.New(self.GetBlock(block.ParentHash()).Root(), self.chainDb)
if statedb == nil {
statedb, err = state.New(self.GetBlock(block.ParentHash(), block.NumberU64()-1).Root(), self.chainDb)
} else {
err = statedb.Reset(chain[i-1].Root())
}
if err != nil {
reportBlock(block, err)
return i, err
@ -897,7 +916,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return i, err
}
// Validate the state using the default validator
err = self.Validator().ValidateState(block, self.GetBlock(block.ParentHash()), statedb, receipts, usedGas)
err = self.Validator().ValidateState(block, self.GetBlock(block.ParentHash(), block.NumberU64()-1), statedb, receipts, usedGas)
if err != nil {
reportBlock(block, err)
return i, err
@ -911,7 +930,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// coalesce logs for later processing
coalescedLogs = append(coalescedLogs, logs...)
if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, err
}
@ -981,7 +1000,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// These logs are later announced as deleted.
collectLogs = func(h common.Hash) {
// Coalesce logs
receipts := GetBlockReceipts(self.chainDb, h)
receipts := GetBlockReceipts(self.chainDb, h, self.hc.GetBlockNumber(h))
for _, receipt := range receipts {
deletedLogs = append(deletedLogs, receipt.Logs...)
@ -993,7 +1012,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
@ -1001,7 +1020,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
}
} else {
// reduce new chain and append new chain blocks for inserting later on
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock)
}
}
@ -1024,7 +1043,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), self.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if oldBlock == nil {
return fmt.Errorf("Invalid old chain")
}
@ -1047,7 +1066,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if err := WriteTransactions(self.chainDb, block); err != nil {
return err
}
receipts := GetBlockReceipts(self.chainDb, block.Hash())
receipts := GetBlockReceipts(self.chainDb, block.Hash(), block.NumberU64())
// write receipts
if err := WriteReceipts(self.chainDb, receipts); err != nil {
return err
@ -1117,15 +1136,12 @@ func (self *BlockChain) update() {
}
}
// reportBlock reports the given block and error using the canonical block
// reporting tool. Reporting the block to the service is handled in a separate
// goroutine.
// reportBlock logs a bad block error.
func reportBlock(block *types.Block, err error) {
if glog.V(logger.Error) {
glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex())
glog.Errorf(" %v", err)
}
go ReportBlock(block, err)
}
// InsertHeaderChain attempts to insert the given header chain in to the local
@ -1185,15 +1201,27 @@ func (self *BlockChain) CurrentHeader() *types.Header {
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *BlockChain) GetTd(hash common.Hash) *big.Int {
return self.hc.GetTd(hash)
// database by hash and number, caching it if found.
func (self *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return self.hc.GetTd(hash, number)
}
// GetHeader retrieves a block header from the database by hash, caching it if
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
return self.hc.GetTdByHash(hash)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (self *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
return self.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (self *BlockChain) GetHeader(hash common.Hash) *types.Header {
return self.hc.GetHeader(hash)
func (self *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
return self.hc.GetHeaderByHash(hash)
}
// HasHeader checks if a block header is present in the database or not, caching

View File

@ -36,34 +36,72 @@ var (
headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast")
blockPrefix = []byte("block-")
blockNumPrefix = []byte("block-num-")
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
headerSuffix = []byte("-header")
bodySuffix = []byte("-body")
tdSuffix = []byte("-td")
txMetaSuffix = []byte{0x01}
receiptsPrefix = []byte("receipts-")
blockReceiptsPrefix = []byte("receipts-block-")
txMetaSuffix = []byte{0x01}
receiptsPrefix = []byte("receipts-")
mipmapPre = []byte("mipmap-log-bloom-")
MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
blockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
configPrefix = []byte("ethereum-config-") // config prefix for the db
// used by old (non-sequential keys) db, now only used for conversion
oldBlockPrefix = []byte("block-")
oldHeaderSuffix = []byte("-header")
oldTdSuffix = []byte("-td") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
oldBodySuffix = []byte("-body")
oldBlockNumPrefix = []byte("block-num-")
oldBlockReceiptsPrefix = []byte("receipts-block-")
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
)
// encodeBlockNumber encodes a block number as big endian uint64
func encodeBlockNumber(number uint64) []byte {
enc := make([]byte, 8)
binary.BigEndian.PutUint64(enc, number)
return enc
}
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
if len(data) == 0 {
return common.Hash{}
data, _ = db.Get(append(oldBlockNumPrefix, big.NewInt(int64(number)).Bytes()...))
if len(data) == 0 {
return common.Hash{}
}
}
return common.BytesToHash(data)
}
// missingNumber is returned by GetBlockNumber if no header with the
// given block hash has been stored in the database
const missingNumber = uint64(0xffffffffffffffff)
// GetBlockNumber returns the block number assigned to a block hash
// if the corresponding header is present in the database
func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
if len(data) != 8 {
data, _ := db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldHeaderSuffix...))
if len(data) == 0 {
return missingNumber
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
glog.Fatalf("failed to decode block header: %v", err)
}
return header.Number.Uint64()
}
return binary.BigEndian.Uint64(data)
}
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
// header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header
@ -100,15 +138,18 @@ func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
func GetHeaderRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
if len(data) == 0 {
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldHeaderSuffix...))
}
return data
}
// GetHeader retrieves the block header corresponding to the hash, nil if none
// found.
func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
data := GetHeaderRLP(db, hash)
func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header {
data := GetHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
}
@ -121,15 +162,18 @@ func GetHeader(db ethdb.Database, hash common.Hash) *types.Header {
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
func GetBodyRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
if len(data) == 0 {
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldBodySuffix...))
}
return data
}
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
data := GetBodyRLP(db, hash)
func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
data := GetBodyRLP(db, hash, number)
if len(data) == 0 {
return nil
}
@ -143,10 +187,13 @@ func GetBody(db ethdb.Database, hash common.Hash) *types.Body {
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found.
func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
if len(data) == 0 {
return nil
data, _ = db.Get(append(append(oldBlockPrefix, hash.Bytes()...), oldTdSuffix...))
if len(data) == 0 {
return nil
}
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
@ -158,13 +205,13 @@ func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
// GetBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body.
func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
// Retrieve the block header and body contents
header := GetHeader(db, hash)
header := GetHeader(db, hash, number)
if header == nil {
return nil
}
body := GetBody(db, hash)
body := GetBody(db, hash, number)
if body == nil {
return nil
}
@ -174,10 +221,13 @@ func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash.
func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
data, _ := db.Get(append(blockReceiptsPrefix, hash[:]...))
func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.Receipts {
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
if len(data) == 0 {
return nil
data, _ = db.Get(append(oldBlockReceiptsPrefix, hash.Bytes()...))
if len(data) == 0 {
return nil
}
}
storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
@ -235,10 +285,9 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
// WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil {
glog.Fatalf("failed to store number to hash mapping into database: %v", err)
return err
}
return nil
}
@ -247,7 +296,6 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last header's hash into database: %v", err)
return err
}
return nil
}
@ -256,7 +304,6 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last block's hash into database: %v", err)
return err
}
return nil
}
@ -265,7 +312,6 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last fast block's hash into database: %v", err)
return err
}
return nil
}
@ -276,45 +322,49 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
if err != nil {
return err
}
key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
hash := header.Hash().Bytes()
num := header.Number.Uint64()
encNum := encodeBlockNumber(num)
key := append(blockHashPrefix, hash...)
if err := db.Put(key, encNum); err != nil {
glog.Fatalf("failed to store hash to number mapping into database: %v", err)
}
key = append(append(headerPrefix, encNum...), hash...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store header into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, hash[:4])
return nil
}
// WriteBody serializes the body of a block into the database.
func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error {
func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
}
return WriteBodyRLP(db, hash, data)
return WriteBodyRLP(db, hash, number, data)
}
// WriteBodyRLP writes a serialized body of a block into the database.
func WriteBodyRLP(db ethdb.Database, hash common.Hash, rlp rlp.RawValue) error {
key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil {
glog.Fatalf("failed to store block body into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
return nil
}
// WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return err
}
key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block total difficulty into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
return nil
@ -323,7 +373,7 @@ func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error {
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.Database, block *types.Block) error {
// Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), block.Body()); err != nil {
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
}
// Store the header too, signaling full block ownership
@ -336,7 +386,7 @@ func WriteBlock(db ethdb.Database, block *types.Block) error {
// WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions.
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
@ -347,9 +397,9 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Rece
return err
}
// Store the flattened receipt slice
if err := db.Put(append(blockReceiptsPrefix, hash.Bytes()...), bytes); err != nil {
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, bytes); err != nil {
glog.Fatalf("failed to store block receipts into database: %v", err)
return err
}
glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4])
return nil
@ -393,11 +443,20 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
glog.Fatalf("failed to store transactions into database: %v", err)
return err
}
return nil
}
// WriteReceipt stores a single transaction receipt into the database.
func WriteReceipt(db ethdb.Database, receipt *types.Receipt) error {
storageReceipt := (*types.ReceiptForStorage)(receipt)
data, err := rlp.EncodeToBytes(storageReceipt)
if err != nil {
return err
}
return db.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data)
}
// WriteReceipts stores a batch of transaction receipts into the database.
func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
batch := db.NewBatch()
@ -416,42 +475,42 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
glog.Fatalf("failed to store receipts into database: %v", err)
return err
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) {
db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
}
// DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
func DeleteHeader(db ethdb.Database, hash common.Hash, number uint64) {
db.Delete(append(blockHashPrefix, hash.Bytes()...))
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
func DeleteBody(db ethdb.Database, hash common.Hash, number uint64) {
db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.Database, hash common.Hash) {
db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
func DeleteTd(db ethdb.Database, hash common.Hash, number uint64) {
db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
}
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash) {
DeleteBlockReceipts(db, hash)
DeleteHeader(db, hash)
DeleteBody(db, hash)
DeleteTd(db, hash)
func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
DeleteBlockReceipts(db, hash, number)
DeleteHeader(db, hash, number)
DeleteBody(db, hash, number)
DeleteTd(db, hash, number)
}
// DeleteBlockReceipts removes all receipt data associated with a block hash.
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash) {
db.Delete(append(blockReceiptsPrefix, hash.Bytes()...))
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) {
db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
}
// DeleteTransaction removes all transaction data associated with a hash.
@ -471,7 +530,7 @@ func DeleteReceipt(db ethdb.Database, hash common.Hash) {
// access the old combined block representation. It will be dropped after the
// network transitions to eth/63.
func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
data, _ := db.Get(append(blockHashPrefix, hash[:]...))
data, _ := db.Get(append(oldBlockHashPrefix, hash[:]...))
if len(data) == 0 {
return nil
}

View File

@ -88,7 +88,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
Root: root,
}, nil, nil, nil)
if block := GetBlock(chainDb, block.Hash()); block != nil {
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil {
@ -100,13 +100,13 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if err := stateBatch.Write(); err != nil {
return nil, fmt.Errorf("cannot write state: %v", err)
}
if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil {
if err := WriteTd(chainDb, block.Hash(), block.NumberU64(), difficulty); err != nil {
return nil, err
}
if err := WriteBlock(chainDb, block); err != nil {
return nil, err
}
if err := WriteBlockReceipts(chainDb, block.Hash(), nil); err != nil {
if err := WriteBlockReceipts(chainDb, block.Hash(), block.NumberU64(), nil); err != nil {
return nil, err
}
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {

View File

@ -35,6 +35,12 @@ import (
"github.com/hashicorp/golang-lru"
)
const (
headerCacheLimit = 512
tdCacheLimit = 1024
numberCacheLimit = 2048
)
// HeaderChain implements the basic block header chain logic that is shared by
// core.BlockChain and light.LightChain. It is not usable in itself, only as
// a part of either structure.
@ -51,6 +57,7 @@ type HeaderChain struct {
headerCache *lru.Cache // Cache for the most recent block headers
tdCache *lru.Cache // Cache for the most recent block total difficulties
numberCache *lru.Cache // Cache for the most recent block numbers
procInterrupt func() bool
@ -68,6 +75,7 @@ type getHeaderValidatorFn func() HeaderValidator
func NewHeaderChain(chainDb ethdb.Database, config *ChainConfig, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
numberCache, _ := lru.New(numberCacheLimit)
// Seed a fast but crypto originating random generator
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
@ -80,6 +88,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *ChainConfig, getValidator ge
chainDb: chainDb,
headerCache: headerCache,
tdCache: tdCache,
numberCache: numberCache,
procInterrupt: procInterrupt,
rand: mrand.New(mrand.NewSource(seed.Int64())),
getValidator: getValidator,
@ -97,7 +106,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *ChainConfig, getValidator ge
hc.currentHeader = hc.genesisHeader
if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
if chead := hc.GetHeader(head); chead != nil {
if chead := hc.GetHeaderByHash(head); chead != nil {
hc.currentHeader = chead
}
}
@ -106,6 +115,19 @@ func NewHeaderChain(chainDb ethdb.Database, config *ChainConfig, getValidator ge
return hc, nil
}
// GetBlockNumber retrieves the block number belonging to the given hash
// from the cache or database
func (hc *HeaderChain) GetBlockNumber(hash common.Hash) uint64 {
if cached, ok := hc.numberCache.Get(hash); ok {
return cached.(uint64)
}
number := GetBlockNumber(hc.chainDb, hash)
if number != missingNumber {
hc.numberCache.Add(hash, number)
}
return number
}
// WriteHeader writes a header into the local chain, given that its parent is
// already known. If the total difficulty of the newly inserted header becomes
// greater than the current known TD, the canonical chain is re-routed.
@ -122,11 +144,11 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
number = header.Number.Uint64()
)
// Calculate the total difficulty of the header
ptd := hc.GetTd(header.ParentHash)
ptd := hc.GetTd(header.ParentHash, number-1)
if ptd == nil {
return NonStatTy, ParentError(header.ParentHash)
}
localTd := hc.GetTd(hc.currentHeaderHash)
localTd := hc.GetTd(hc.currentHeaderHash, hc.currentHeader.Number.Uint64())
externTd := new(big.Int).Add(header.Difficulty, ptd)
// If the total difficulty is higher than our known, add it to the canonical chain
@ -134,21 +156,25 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
// Delete any canonical number assignments above the new head
for i := number + 1; GetCanonicalHash(hc.chainDb, i) != (common.Hash{}); i++ {
for i := number + 1; ; i++ {
hash := GetCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
DeleteCanonicalHash(hc.chainDb, i)
}
// Overwrite any stale canonical number assignments
var (
headHash = header.ParentHash
headHeader = hc.GetHeader(headHash)
headNumber = headHeader.Number.Uint64()
headNumber = header.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
)
for GetCanonicalHash(hc.chainDb, headNumber) != headHash {
WriteCanonicalHash(hc.chainDb, headHash, headNumber)
headHash = headHeader.ParentHash
headHeader = hc.GetHeader(headHash)
headNumber = headHeader.Number.Uint64()
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// Extend the canonical chain with the new header
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
@ -164,13 +190,14 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
status = SideStatTy
}
// Irrelevant of the canonical status, write the header itself to the database
if err := hc.WriteTd(hash, externTd); err != nil {
if err := hc.WriteTd(hash, number, externTd); err != nil {
glog.Fatalf("failed to write header total difficulty: %v", err)
}
if err := WriteHeader(hc.chainDb, header); err != nil {
glog.Fatalf("failed to write header contents: %v", err)
}
hc.headerCache.Add(hash, header)
hc.numberCache.Add(hash, number)
return
}
@ -239,7 +266,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
var err error
if index == 0 {
err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash), checkPow)
err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash, header.Number.Uint64()-1), checkPow)
} else {
err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
}
@ -300,7 +327,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
// hash, fetching towards the genesis block.
func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
// Get the origin header from which to fetch
header := hc.GetHeader(hash)
header := hc.GetHeaderByHash(hash)
if header == nil {
return nil
}
@ -308,7 +335,7 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co
chain := make([]common.Hash, 0, max)
for i := uint64(0); i < max; i++ {
next := header.ParentHash
if header = hc.GetHeader(next); header == nil {
if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil {
break
}
chain = append(chain, next)
@ -320,13 +347,13 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (hc *HeaderChain) GetTd(hash common.Hash) *big.Int {
// database by hash and number, caching it if found.
func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
// Short circuit if the td's already in the cache, retrieve otherwise
if cached, ok := hc.tdCache.Get(hash); ok {
return cached.(*big.Int)
}
td := GetTd(hc.chainDb, hash)
td := GetTd(hc.chainDb, hash, number)
if td == nil {
return nil
}
@ -335,24 +362,30 @@ func (hc *HeaderChain) GetTd(hash common.Hash) *big.Int {
return td
}
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
return hc.GetTd(hash, hc.GetBlockNumber(hash))
}
// WriteTd stores a block's total difficulty into the database, also caching it
// along the way.
func (hc *HeaderChain) WriteTd(hash common.Hash, td *big.Int) error {
if err := WriteTd(hc.chainDb, hash, td); err != nil {
func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error {
if err := WriteTd(hc.chainDb, hash, number, td); err != nil {
return err
}
hc.tdCache.Add(hash, new(big.Int).Set(td))
return nil
}
// GetHeader retrieves a block header from the database by hash, caching it if
// found.
func (hc *HeaderChain) GetHeader(hash common.Hash) *types.Header {
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
// Short circuit if the header's already in the cache, retrieve otherwise
if header, ok := hc.headerCache.Get(hash); ok {
return header.(*types.Header)
}
header := GetHeader(hc.chainDb, hash)
header := GetHeader(hc.chainDb, hash, number)
if header == nil {
return nil
}
@ -361,10 +394,16 @@ func (hc *HeaderChain) GetHeader(hash common.Hash) *types.Header {
return header
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
return hc.GetHeader(hash, hc.GetBlockNumber(hash))
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (hc *HeaderChain) HasHeader(hash common.Hash) bool {
return hc.GetHeader(hash) != nil
return hc.GetHeaderByHash(hash) != nil
}
// GetHeaderByNumber retrieves a block header from the database by number,
@ -374,7 +413,7 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
if hash == (common.Hash{}) {
return nil
}
return hc.GetHeader(hash)
return hc.GetHeader(hash, number)
}
// CurrentHeader retrieves the current head header of the canonical chain. The
@ -394,7 +433,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
// DeleteCallback is a callback function that is called by SetHead before
// each header is deleted.
type DeleteCallback func(common.Hash)
type DeleteCallback func(common.Hash, uint64)
// SetHead rewinds the local chain to a new head. Everything above the new head
// will be deleted and the new one set.
@ -406,12 +445,13 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
for hc.currentHeader != nil && hc.currentHeader.Number.Uint64() > head {
hash := hc.currentHeader.Hash()
num := hc.currentHeader.Number.Uint64()
if delFn != nil {
delFn(hash)
delFn(hash, num)
}
DeleteHeader(hc.chainDb, hash)
DeleteTd(hc.chainDb, hash)
hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash)
DeleteHeader(hc.chainDb, hash, num)
DeleteTd(hc.chainDb, hash, num)
hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash, hc.currentHeader.Number.Uint64()-1)
}
// Roll back the canonical chain numbering
for i := height; i > head; i-- {
@ -420,6 +460,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
// Clear out any stale content from the caches
hc.headerCache.Purge()
hc.tdCache.Purge()
hc.numberCache.Purge()
if hc.currentHeader == nil {
hc.currentHeader = hc.genesisHeader

View File

@ -68,6 +68,28 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
}, nil
}
// Reset clears out all emphemeral state objects from the state db, but keeps
// the underlying state trie to avoid reloading data for the next operations.
func (self *StateDB) Reset(root common.Hash) error {
var (
err error
tr = self.trie
)
if self.trie.Hash() != root {
if tr, err = trie.NewSecure(root, self.db); err != nil {
return err
}
}
*self = StateDB{
db: self.db,
trie: tr,
stateObjects: make(map[string]*StateObject),
refund: new(big.Int),
logs: make(map[common.Hash]vm.Logs),
}
return nil
}
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
self.thash = thash
self.bhash = bhash
@ -127,7 +149,7 @@ func (self *StateDB) GetNonce(addr common.Address) uint64 {
return stateObject.nonce
}
return 0
return StartingNonce
}
func (self *StateDB) GetCode(addr common.Address) []byte {

View File

@ -368,6 +368,9 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) {
// GetTransaction returns a transaction if it is contained in the pool
// and nil otherwise.
func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
tp.mu.RLock()
defer tp.mu.RUnlock()
// check the txs first
if tx, ok := tp.pending[hash]; ok {
return tx
@ -421,12 +424,18 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) {
self.mu.Lock()
defer self.mu.Unlock()
for _, tx := range txs {
self.RemoveTx(tx.Hash())
self.removeTx(tx.Hash())
}
}
// RemoveTx removes the transaction with the given hash from the pool.
func (pool *TxPool) RemoveTx(hash common.Hash) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.removeTx(hash)
}
func (pool *TxPool) removeTx(hash common.Hash) {
// delete from pending pool
delete(pool.pending, hash)
// delete from queue

View File

@ -150,8 +150,10 @@ type Block struct {
// of the chain up to and including the block.
td *big.Int
// ReceivedAt is used by package eth to track block propagation time.
ReceivedAt time.Time
// These fields are used by package eth to track
// inter-peer block relay.
ReceivedAt time.Time
ReceivedFrom interface{}
}
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the

View File

@ -73,7 +73,7 @@ type Environment interface {
DelegateCall(me ContractRef, addr common.Address, data []byte, gas, price *big.Int) ([]byte, error)
// Create a new contract
Create(me ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error)
StructLogs() []StructLog
}

View File

@ -30,7 +30,7 @@ import (
// to query for information.
func GetHashFn(ref common.Hash, chain *BlockChain) func(n uint64) common.Hash {
return func(n uint64) common.Hash {
for block := chain.GetBlock(ref); block != nil; block = chain.GetBlock(block.ParentHash()) {
for block := chain.GetBlockByHash(ref); block != nil; block = chain.GetBlock(block.ParentHash(), block.NumberU64()-1) {
if block.NumberU64() == n {
return block.Hash()
}

View File

@ -1,18 +1,18 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
// This file is part of the go-ethereum library.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
@ -32,8 +32,8 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/rlp"
@ -43,13 +43,12 @@ import (
// PublicFullEthereumAPI provides an API to access Ethereum full node-related
// information.
type PublicFullEthereumAPI struct {
e *FullNodeService
gpo *gasprice.GasPriceOracle
e *FullNodeService
}
// NewPublicFullEthereumAPI creates a new Etheruem protocol API for full nodes.
func NewPublicFullEthereumAPI(e *FullNodeService, gpo *gasprice.GasPriceOracle) *PublicFullEthereumAPI {
return &PublicFullEthereumAPI{e, gpo}
func NewPublicFullEthereumAPI(e *FullNodeService) *PublicFullEthereumAPI {
return &PublicFullEthereumAPI{e}
}
// Etherbase is the address that mining rewards will be send to
@ -57,7 +56,7 @@ func (s *PublicFullEthereumAPI) Etherbase() (common.Address, error) {
return s.e.Etherbase()
}
// see Etherbase
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
func (s *PublicFullEthereumAPI) Coinbase() (common.Address, error) {
return s.Etherbase()
}
@ -97,18 +96,17 @@ func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
func (s *PublicMinerAPI) GetWork() ([]string, error) {
func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
if !s.e.IsMining() {
if err := s.e.StartMining(0, ""); err != nil {
return nil, err
return work, err
}
}
if work, err := s.agent.GetWork(); err == nil {
return work[:], nil
} else {
glog.Infof("%v\n", err)
if work, err = s.agent.GetWork(); err == nil {
return
}
return nil, fmt.Errorf("mining not ready")
glog.V(logger.Debug).Infof("%v", err)
return work, fmt.Errorf("mining not ready")
}
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
@ -298,41 +296,6 @@ func (api *PublicFullDebugAPI) DumpBlock(number uint64) (state.World, error) {
return stateDb.RawDump(), nil
}
// GetBlockRlp retrieves the RLP encoded for of a single block.
func (api *PublicFullDebugAPI) GetBlockRlp(number uint64) (string, error) {
block := api.eth.BlockChain().GetBlockByNumber(number)
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
encoded, err := rlp.EncodeToBytes(block)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", encoded), nil
}
// PrintBlock retrieves a block and returns its pretty printed form.
func (api *PublicFullDebugAPI) PrintBlock(number uint64) (string, error) {
block := api.eth.BlockChain().GetBlockByNumber(number)
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
return fmt.Sprintf("%s", block), nil
}
// SeedHash retrieves the seed hash of a block.
func (api *PublicFullDebugAPI) SeedHash(number uint64) (string, error) {
block := api.eth.BlockChain().GetBlockByNumber(number)
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
hash, err := ethash.GetSeedHash(number)
if err != nil {
return "", err
}
return fmt.Sprintf("0x%x", hash), nil
}
// PrivateFullDebugAPI is the collection of Etheruem full node APIs exposed over
// the private debugging endpoint.
type PrivateFullDebugAPI struct {
@ -346,7 +309,7 @@ func NewPrivateFullDebugAPI(config *core.ChainConfig, eth *FullNodeService) *Pri
return &PrivateFullDebugAPI{config: config, eth: eth}
}
// BlockTraceResults is the returned value when replaying a block to check for
// BlockTraceResult is the returned value when replaying a block to check for
// consensus results and full VM trace logs for all included transactions.
type BlockTraceResult struct {
Validated bool `json:"validated"`
@ -400,7 +363,7 @@ func (api *PrivateFullDebugAPI) TraceBlockByNumber(number uint64, config *vm.Con
// TraceBlockByHash processes the block by hash.
func (api *PrivateFullDebugAPI) TraceBlockByHash(hash common.Hash, config *vm.Config) BlockTraceResult {
// Fetch the block that we aim to reprocess
block := api.eth.BlockChain().GetBlock(hash)
block := api.eth.BlockChain().GetBlockByHash(hash)
if block == nil {
return BlockTraceResult{Error: fmt.Sprintf("block #%x not found", hash)}
}
@ -440,10 +403,10 @@ func (api *PrivateFullDebugAPI) traceBlock(block *types.Block, config *vm.Config
config.Debug = true // make sure debug is set.
config.Logger.Collector = collector
if err := core.ValidateHeader(api.config, blockchain.AuxValidator(), block.Header(), blockchain.GetHeader(block.ParentHash()), true, false); err != nil {
if err := core.ValidateHeader(api.config, blockchain.AuxValidator(), block.Header(), blockchain.GetHeader(block.ParentHash(), block.NumberU64()-1), true, false); err != nil {
return false, collector.traces, err
}
statedb, err := state.New(blockchain.GetBlock(block.ParentHash()).Root(), api.eth.ChainDb())
statedb, err := state.New(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root(), api.eth.ChainDb())
if err != nil {
return false, collector.traces, err
}
@ -452,21 +415,12 @@ func (api *PrivateFullDebugAPI) traceBlock(block *types.Block, config *vm.Config
if err != nil {
return false, collector.traces, err
}
if err := validator.ValidateState(block, blockchain.GetBlock(block.ParentHash()), statedb, receipts, usedGas); err != nil {
if err := validator.ValidateState(block, blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1), statedb, receipts, usedGas); err != nil {
return false, collector.traces, err
}
return true, collector.traces, nil
}
// VmLoggerOptions are the options used for debugging transactions and capturing
// specific data.
type VmLoggerOptions struct {
DisableMemory bool // disable memory capture
DisableStack bool // disable stack capture
DisableStorage bool // disable storage capture
FullStorage bool // show full storage (slow)
}
// callmsg is the message type used for call transations.
type callmsg struct {
addr common.Address
@ -498,25 +452,25 @@ func formatError(err error) string {
// TraceTransaction returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
func (s *PrivateFullDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ethapi.ExecutionResult, error) {
func (api *PrivateFullDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.LogConfig) (*ethapi.ExecutionResult, error) {
if logger == nil {
logger = new(vm.LogConfig)
}
// Retrieve the tx from the chain and the containing block
tx, blockHash, _, txIndex := core.GetTransaction(s.eth.ChainDb(), txHash)
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
if tx == nil {
return nil, fmt.Errorf("transaction %x not found", txHash)
}
block := s.eth.BlockChain().GetBlock(blockHash)
block := api.eth.BlockChain().GetBlockByHash(blockHash)
if block == nil {
return nil, fmt.Errorf("block %x not found", blockHash)
}
// Create the state database to mutate and eventually trace
parent := s.eth.BlockChain().GetBlock(block.ParentHash())
parent := api.eth.BlockChain().GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
return nil, fmt.Errorf("block parent %x not found", block.ParentHash())
}
stateDb, err := state.New(parent.Root(), s.eth.ChainDb())
stateDb, err := state.New(parent.Root(), api.eth.ChainDb())
if err != nil {
return nil, err
}
@ -537,7 +491,7 @@ func (s *PrivateFullDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.Lo
}
// Mutate the state if we haven't reached the tracing transaction yet
if uint64(idx) < txIndex {
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{})
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{})
_, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
return nil, fmt.Errorf("mutation failed: %v", err)
@ -545,7 +499,7 @@ func (s *PrivateFullDebugAPI) TraceTransaction(txHash common.Hash, logger *vm.Lo
continue
}
// Otherwise trace the transaction and return
vmenv := core.NewEnv(stateDb, s.config, s.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
vmenv := core.NewEnv(stateDb, api.config, api.eth.BlockChain(), msg, block.Header(), vm.Config{Debug: true, Logger: *logger})
ret, gas, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
return nil, fmt.Errorf("tracing failed: %v", err)

View File

@ -21,26 +21,23 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
rpc "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
// EthApiBackend implements ethapi.Backend for full nodes
type EthApiBackend struct {
eth *FullNodeService
gpo *gasprice.GasPriceOracle
SolcPath string
solc *compiler.Solidity
eth *FullNodeService
gpo *gasprice.GasPriceOracle
}
func (b *EthApiBackend) SetHead(number uint64) {
@ -73,44 +70,35 @@ func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumb
return b.eth.blockchain.GetBlockByNumber(uint64(blockNr)), nil
}
func (b *EthApiBackend) StateByNumber(blockNr rpc.BlockNumber) (ethapi.State, error) {
func (b *EthApiBackend) StateAndHeaderByNumber(blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) {
// Pending state is only known by the miner
if blockNr == rpc.PendingBlockNumber {
_, state := b.eth.miner.Pending()
return &EthApiState{state}, nil
block, state := b.eth.miner.Pending()
return EthApiState{state}, block.Header(), nil
}
// Otherwise resolve the block number and return its state
header := b.HeaderByNumber(blockNr)
if header == nil {
return nil, nil
return nil, nil, nil
}
stateDb, err := state.New(header.Root, b.eth.chainDb)
return &EthApiState{stateDb}, err
return EthApiState{stateDb}, header, err
}
func (b *EthApiBackend) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
return b.eth.blockchain.GetBlock(blockHash), nil
}
func (b *EthApiBackend) GetState(header *types.Header) (ethapi.State, error) {
stateDb, err := state.New(header.Root, b.eth.chainDb)
return &EthApiState{stateDb}, err
return b.eth.blockchain.GetBlockByHash(blockHash), nil
}
func (b *EthApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
return core.GetBlockReceipts(b.eth.chainDb, blockHash), nil
return core.GetBlockReceipts(b.eth.chainDb, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)), nil
}
func (b *EthApiBackend) GetTd(blockHash common.Hash) *big.Int {
return b.eth.blockchain.GetTd(blockHash)
return b.eth.blockchain.GetTdByHash(blockHash)
}
func (b *EthApiBackend) GetVMEnv(ctx context.Context, msg core.Message, header *types.Header) (vm.Environment, func() error, error) {
stateDb, err := state.New(header.Root, b.eth.chainDb)
if err != nil {
return nil, nil, err
}
stateDb = stateDb.Copy()
func (b *EthApiBackend) GetVMEnv(ctx context.Context, msg core.Message, state ethapi.State, header *types.Header) (vm.Environment, func() error, error) {
stateDb := state.(EthApiState).state.Copy()
addr, _ := msg.From()
from := stateDb.GetOrNewStateObject(addr)
from.SetBalance(common.MaxBig)
@ -119,48 +107,55 @@ func (b *EthApiBackend) GetVMEnv(ctx context.Context, msg core.Message, header *
}
func (b *EthApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
b.eth.txPool.SetLocal(signedTx)
return b.eth.txPool.Add(signedTx)
}
func (b *EthApiBackend) RemoveTx(txHash common.Hash) {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
b.eth.txPool.RemoveTx(txHash)
}
func (b *EthApiBackend) GetPoolTransactions() types.Transactions {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
return b.eth.txPool.GetTransactions()
}
func (b *EthApiBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
return b.eth.txPool.GetTransaction(txHash)
}
func (b *EthApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
return b.eth.txPool.State().GetNonce(addr), nil
}
func (b *EthApiBackend) Stats() (pending int, queued int) {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
return b.eth.txPool.Stats()
}
func (b *EthApiBackend) TxPoolContent() (map[common.Address]map[uint64][]*types.Transaction, map[common.Address]map[uint64][]*types.Transaction) {
b.eth.txMu.Lock()
defer b.eth.txMu.Unlock()
return b.eth.TxPool().Content()
}
func (b *EthApiBackend) Solc() (*compiler.Solidity, error) {
var err error
if b.solc == nil {
b.solc, err = compiler.New(b.SolcPath)
}
return b.solc, err
}
func (b *EthApiBackend) SetSolc(solcPath string) (*compiler.Solidity, error) {
b.SolcPath = solcPath
b.solc = nil
return b.Solc()
}
func (b *EthApiBackend) Downloader() *downloader.Downloader {
return b.eth.Downloader()
}
@ -189,18 +184,18 @@ type EthApiState struct {
state *state.StateDB
}
func (s *EthApiState) GetBalance(ctx context.Context, addr common.Address) (*big.Int, error) {
func (s EthApiState) GetBalance(ctx context.Context, addr common.Address) (*big.Int, error) {
return s.state.GetBalance(addr), nil
}
func (s *EthApiState) GetCode(ctx context.Context, addr common.Address) ([]byte, error) {
func (s EthApiState) GetCode(ctx context.Context, addr common.Address) ([]byte, error) {
return s.state.GetCode(addr), nil
}
func (s *EthApiState) GetState(ctx context.Context, a common.Address, b common.Hash) (common.Hash, error) {
func (s EthApiState) GetState(ctx context.Context, a common.Address, b common.Hash) (common.Hash, error) {
return s.state.GetState(a, b), nil
}
func (s *EthApiState) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
func (s EthApiState) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
return s.state.GetNonce(addr), nil
}

View File

@ -18,7 +18,6 @@
package eth
import (
"bytes"
"errors"
"fmt"
"math/big"
@ -26,11 +25,13 @@ import (
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/common/httpclient"
"github.com/ethereum/go-ethereum/common/registrar/ethreg"
"github.com/ethereum/go-ethereum/core"
@ -39,15 +40,14 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
)
@ -67,11 +67,11 @@ var (
type Config struct {
ChainConfig *core.ChainConfig // chain configuration
NetworkId int // Network ID to use for selecting peers to connect to
Genesis string // Genesis JSON to seed the chain database with
FastSync bool // Enables the state download based fast synchronisation algorithm
LightMode bool // Running in light client mode
LightServ int // Maximum percentage of time allowed for serving LES requests
NetworkId int // Network ID to use for selecting peers to connect to
Genesis string // Genesis JSON to seed the chain database with
FastSync bool // Enables the state download based fast synchronisation algorithm
LightMode bool // Running in light client mode
LightServ int // Maximum percentage of time allowed for serving LES requests
LightPeers int // Maximum number of LES client peers
BlockChainVersion int
@ -116,9 +116,11 @@ type LesServer interface {
type FullNodeService struct {
chainConfig *core.ChainConfig
// Channel for shutting down the service
shutdownChan chan bool
shutdownChan chan bool // Channel for shutting down the ethereum
stopDbUpgrade func() // stop chain db sequential key upgrade
// Handlers
txPool *core.TxPool
txMu sync.Mutex
blockchain *core.BlockChain
protocolManager *ProtocolManager
ls LesServer
@ -131,7 +133,7 @@ type FullNodeService struct {
httpclient *httpclient.HTTPClient
accountManager *accounts.Manager
apiBackend *EthApiBackend
ApiBackend *EthApiBackend
miner *miner.Miner
Mining bool
@ -139,6 +141,8 @@ type FullNodeService struct {
AutoDAG bool
autodagquit chan bool
etherbase common.Address
solcPath string
solc *compiler.Solidity
NatSpec bool
PowTest bool
@ -153,10 +157,11 @@ func (s *FullNodeService) AddLesServer(ls LesServer) {
// New creates a new FullNodeService object (including the
// initialisation of the common Ethereum object)
func New(ctx *node.ServiceContext, config *Config) (*FullNodeService, error) {
chainDb, dappDb, err := CreateDBs(ctx, config)
chainDb, dappDb, err := CreateDBs(ctx, config, "chaindata")
if err != nil {
return nil, err
}
stopDbUpgrade := upgradeSequentialKeys(chainDb)
if err := SetupGenesisBlock(&chainDb, config); err != nil {
return nil, err
}
@ -166,19 +171,21 @@ func New(ctx *node.ServiceContext, config *Config) (*FullNodeService, error) {
}
eth := &FullNodeService{
chainDb: chainDb,
dappDb: dappDb,
eventMux: ctx.EventMux,
accountManager: config.AccountManager,
pow: pow,
shutdownChan: make(chan bool),
httpclient: httpclient.New(config.DocRoot),
netVersionId: config.NetworkId,
NatSpec: config.NatSpec,
PowTest: config.PowTest,
etherbase: config.Etherbase,
MinerThreads: config.MinerThreads,
AutoDAG: config.AutoDAG,
chainDb: chainDb,
dappDb: dappDb,
eventMux: ctx.EventMux,
accountManager: config.AccountManager,
pow: pow,
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
httpclient: httpclient.New(config.DocRoot),
netVersionId: config.NetworkId,
NatSpec: config.NatSpec,
PowTest: config.PowTest,
etherbase: config.Etherbase,
MinerThreads: config.MinerThreads,
AutoDAG: config.AutoDAG,
solcPath: config.SolcPath,
}
if err := upgradeChainDatabase(chainDb); err != nil {
@ -200,7 +207,7 @@ func New(ctx *node.ServiceContext, config *Config) (*FullNodeService, error) {
// load the genesis block or write a new one if no genesis
// block is prenent in the database.
genesis := core.GetBlock(chainDb, core.GetCanonicalHash(chainDb, 0))
genesis := core.GetBlock(chainDb, core.GetCanonicalHash(chainDb, 0), 0)
if genesis == nil {
genesis, err = core.WriteDefaultGenesisBlock(chainDb)
if err != nil {
@ -244,15 +251,15 @@ func New(ctx *node.ServiceContext, config *Config) (*FullNodeService, error) {
GpobaseCorrectionFactor: config.GpobaseCorrectionFactor,
}
gpo := gasprice.NewGasPriceOracle(eth.blockchain, chainDb, eth.eventMux, gpoParams)
eth.apiBackend = &EthApiBackend{eth, gpo, config.SolcPath, nil}
eth.ApiBackend = &EthApiBackend{eth, gpo}
return eth, nil
}
// CreateDBs creates the chain and dapp databases for an Ethereum service
func CreateDBs(ctx *node.ServiceContext, config *Config) (chainDb, dappDb ethdb.Database, err error) {
func CreateDBs(ctx *node.ServiceContext, config *Config, name string) (chainDb, dappDb ethdb.Database, err error) {
// Open the chain database and perform any upgrades needed
chainDb, err = ctx.OpenDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles)
chainDb, err = ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles)
if err != nil {
return nil, nil, err
}
@ -285,7 +292,7 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
*chainDb = config.TestGenesisState
}
if config.TestGenesisBlock != nil {
core.WriteTd(*chainDb, config.TestGenesisBlock.Hash(), config.TestGenesisBlock.Difficulty())
core.WriteTd(*chainDb, config.TestGenesisBlock.Hash(), config.TestGenesisBlock.NumberU64(), config.TestGenesisBlock.Difficulty())
core.WriteBlock(*chainDb, config.TestGenesisBlock)
core.WriteCanonicalHash(*chainDb, config.TestGenesisBlock.Hash(), config.TestGenesisBlock.NumberU64())
core.WriteHeadBlockHash(*chainDb, config.TestGenesisBlock.Hash())
@ -311,11 +318,11 @@ func CreatePoW(config *Config) (*ethash.Ethash, error) {
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *FullNodeService) APIs() []rpc.API {
return append(ethapi.GetAPIs(s.apiBackend), []rpc.API{
return append(ethapi.GetAPIs(s.ApiBackend, &s.solcPath, &s.solc), []rpc.API{
{
Namespace: "eth",
Version: "1.0",
Service: NewPublicFullEthereumAPI(s, s.apiBackend.gpo),
Service: NewPublicFullEthereumAPI(s),
Public: true,
}, {
Namespace: "eth",
@ -335,7 +342,7 @@ func (s *FullNodeService) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux),
Service: filters.NewPublicFilterAPI(s.ApiBackend),
Public: true,
}, {
Namespace: "admin",
@ -428,6 +435,9 @@ func (s *FullNodeService) Start(srvr *p2p.Server) error {
// Stop implements node.Service, terminating all internal goroutines used by the
// Ethereum protocol.
func (s *FullNodeService) Stop() error {
if s.stopDbUpgrade != nil {
s.stopDbUpgrade()
}
s.blockchain.Stop()
s.protocolManager.Stop()
if s.ls != nil {
@ -527,104 +537,3 @@ func dagFiles(epoch uint64) (string, string) {
dag := fmt.Sprintf("full-R%d-%x", ethashRevision, seedHash[:8])
return dag, "full-R" + dag
}
// upgradeChainDatabase ensures that the chain database stores block split into
// separate header and body entries.
func upgradeChainDatabase(db ethdb.Database) error {
// Short circuit if the head block is stored already as separate header and body
data, err := db.Get([]byte("LastBlock"))
if err != nil {
return nil
}
head := common.BytesToHash(data)
if block := core.GetBlockByHashOld(db, head); block == nil {
return nil
}
// At least some of the database is still the old format, upgrade (skip the head block!)
glog.V(logger.Info).Info("Old database detected, upgrading...")
if db, ok := db.(*ethdb.LDBDatabase); ok {
blockPrefix := []byte("block-hash-")
for it := db.NewIterator(); it.Next(); {
// Skip anything other than a combined block
if !bytes.HasPrefix(it.Key(), blockPrefix) {
continue
}
// Skip the head block (merge last to signal upgrade completion)
if bytes.HasSuffix(it.Key(), head.Bytes()) {
continue
}
// Load the block, split and serialize (order!)
block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))
if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
return err
}
if err := core.WriteBody(db, block.Hash(), block.Body()); err != nil {
return err
}
if err := core.WriteHeader(db, block.Header()); err != nil {
return err
}
if err := db.Delete(it.Key()); err != nil {
return err
}
}
// Lastly, upgrade the head block, disabling the upgrade mechanism
current := core.GetBlockByHashOld(db, head)
if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
return err
}
if err := core.WriteBody(db, current.Hash(), current.Body()); err != nil {
return err
}
if err := core.WriteHeader(db, current.Header()); err != nil {
return err
}
}
return nil
}
func addMipmapBloomBins(db ethdb.Database) (err error) {
const mipmapVersion uint = 2
// check if the version is set. We ignore data for now since there's
// only one version so we can easily ignore it for now
var data []byte
data, _ = db.Get([]byte("setting-mipmap-version"))
if len(data) > 0 {
var version uint
if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion {
return nil
}
}
defer func() {
if err == nil {
var val []byte
val, err = rlp.EncodeToBytes(mipmapVersion)
if err == nil {
err = db.Put([]byte("setting-mipmap-version"), val)
}
return
}
}()
latestBlock := core.GetBlock(db, core.GetHeadBlockHash(db))
if latestBlock == nil { // clean database
return
}
tstart := time.Now()
glog.V(logger.Info).Infoln("upgrading db log bloom bins")
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
hash := core.GetCanonicalHash(db, i)
if (hash == common.Hash{}) {
return fmt.Errorf("chain db corrupted. Could not find block %d.", i)
}
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash))
}
glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart))
return nil
}

View File

@ -0,0 +1,74 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
)
const (
// The Ethereum main network genesis block.
defaultGenesisHash = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
badBlocksURL = "https://badblocks.ethdev.com"
)
var EnableBadBlockReporting = false
func sendBadBlockReport(block *types.Block, err error) {
if !EnableBadBlockReporting {
return
}
var (
blockRLP, _ = rlp.EncodeToBytes(block)
params = map[string]interface{}{
"block": common.Bytes2Hex(blockRLP),
"blockHash": block.Hash().Hex(),
"errortype": err.Error(),
"client": "go",
}
)
if !block.ReceivedAt.IsZero() {
params["receivedAt"] = block.ReceivedAt.UTC().String()
}
if p, ok := block.ReceivedFrom.(*peer); ok {
params["receivedFrom"] = map[string]interface{}{
"enode": fmt.Sprintf("enode://%x@%v", p.ID(), p.RemoteAddr()),
"name": p.Name(),
"protocolVersion": p.version,
}
}
jsonStr, _ := json.Marshal(map[string]interface{}{"method": "eth_badBlock", "id": "1", "jsonrpc": "2.0", "params": []interface{}{params}})
client := http.Client{Timeout: 8 * time.Second}
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
if err != nil {
glog.V(logger.Debug).Infoln(err)
return
}
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode)
resp.Body.Close()
}

View File

@ -19,10 +19,9 @@ package eth
import (
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
@ -43,14 +42,28 @@ type ContractBackend struct {
// NewContractBackend creates a new native contract backend using an existing
// Etheruem object.
func NewContractBackend(eth *FullNodeService) *ContractBackend {
func NewContractBackend(apiBackend ethapi.Backend) *ContractBackend {
return &ContractBackend{
eapi: ethapi.NewPublicEthereumAPI(eth.apiBackend),
bcapi: ethapi.NewPublicBlockChainAPI(eth.apiBackend),
txapi: ethapi.NewPublicTransactionPoolAPI(eth.apiBackend),
eapi: ethapi.NewPublicEthereumAPI(apiBackend, nil, nil),
bcapi: ethapi.NewPublicBlockChainAPI(apiBackend),
txapi: ethapi.NewPublicTransactionPoolAPI(apiBackend),
}
}
// HasCode implements bind.ContractVerifier.HasCode by retrieving any code associated
// with the contract from the local API, and checking its size.
func (b *ContractBackend) HasCode(ctx context.Context, contract common.Address, pending bool) (bool, error) {
if ctx == nil {
ctx = context.Background()
}
block := rpc.LatestBlockNumber
if pending {
block = rpc.PendingBlockNumber
}
out, err := b.bcapi.GetCode(ctx, contract, block)
return len(common.FromHex(out)) > 0, err
}
// ContractCall implements bind.ContractCaller executing an Ethereum contract
// call with the specified data as the input. The pending flag requests execution
// against the pending block, not the stable head of the chain.
@ -69,9 +82,6 @@ func (b *ContractBackend) ContractCall(ctx context.Context, contract common.Addr
}
// Execute the call and convert the output back to Go types
out, err := b.bcapi.Call(ctx, args, block)
if err == ethapi.ErrNoCode {
err = bind.ErrNoCode
}
return common.FromHex(out), err
}
@ -109,9 +119,6 @@ func (b *ContractBackend) EstimateGasLimit(ctx context.Context, sender common.Ad
Value: *rpc.NewHexNumber(value),
Data: common.ToHex(data),
})
if err == ethapi.ErrNoCode {
err = bind.ErrNoCode
}
return out.BigInt(), err
}

View File

@ -0,0 +1,341 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package eth implements the Ethereum protocol.
package eth
import (
"bytes"
"encoding/binary"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
)
var useSequentialKeys = []byte("dbUpgrade_20160530sequentialKeys")
// upgradeSequentialKeys checks the chain database version and
// starts a background process to make upgrades if necessary.
// Returns a stop function that blocks until the process has
// been safely stopped.
func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
data, _ := db.Get(useSequentialKeys)
if len(data) > 0 && data[0] == 42 {
return nil // already converted
}
if data, _ := db.Get([]byte("LastHeader")); len(data) == 0 {
db.Put(useSequentialKeys, []byte{42})
return nil // empty database, nothing to do
}
stopChn := make(chan struct{})
stoppedChn := make(chan struct{})
go func() {
stopFn := func() bool {
select {
case <-time.After(time.Microsecond * 100): // make sure other processes don't get starved
case <-stopChn:
return true
}
return false
}
err, stopped := upgradeSequentialCanonicalNumbers(db, stopFn)
if err == nil && !stopped {
err, stopped = upgradeSequentialBlocks(db, stopFn)
}
if err == nil && !stopped {
err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
}
if err == nil && !stopped {
glog.V(logger.Info).Infof("Database conversion successful")
db.Put(useSequentialKeys, []byte{42})
}
if err != nil {
glog.V(logger.Error).Infof("Database conversion failed: %v", err)
}
close(stoppedChn)
}()
return func() {
close(stopChn)
<-stoppedChn
}
}
// upgradeSequentialCanonicalNumbers reads all old format canonical numbers from
// the database, writes them in new format and deletes the old ones if successful.
func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (error, bool) {
prefix := []byte("block-num-")
it := db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(prefix)
cnt := 0
for bytes.HasPrefix(it.Key(), prefix) {
keyPtr := it.Key()
if len(keyPtr) < 20 {
cnt++
if cnt%100000 == 0 {
glog.V(logger.Info).Infof("converting %d canonical numbers...", cnt)
}
number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
newKey := []byte("h12345678n")
binary.BigEndian.PutUint64(newKey[1:9], number)
if err := db.Put(newKey, it.Value()); err != nil {
return err, false
}
if err := db.Delete(keyPtr); err != nil {
return err, false
}
}
if stopFn() {
return nil, true
}
it.Next()
}
if cnt > 0 {
glog.V(logger.Info).Infof("converted %d canonical numbers...", cnt)
}
return nil, false
}
// upgradeSequentialBlocks reads all old format block headers, bodies, TDs and block
// receipts from the database, writes them in new format and deletes the old ones
// if successful.
func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool) {
prefix := []byte("block-")
it := db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(prefix)
cnt := 0
for bytes.HasPrefix(it.Key(), prefix) {
keyPtr := it.Key()
if len(keyPtr) >= 38 {
cnt++
if cnt%10000 == 0 {
glog.V(logger.Info).Infof("converting %d blocks...", cnt)
}
// convert header, body, td and block receipts
var keyPrefix [38]byte
copy(keyPrefix[:], keyPtr[0:38])
hash := keyPrefix[6:38]
if err := upgradeSequentialBlockData(db, hash); err != nil {
return err, false
}
// delete old db entries belonging to this hash
for bytes.HasPrefix(it.Key(), keyPrefix[:]) {
if err := db.Delete(it.Key()); err != nil {
return err, false
}
it.Next()
}
if err := db.Delete(append([]byte("receipts-block-"), hash...)); err != nil {
return err, false
}
} else {
it.Next()
}
if stopFn() {
return nil, true
}
}
if cnt > 0 {
glog.V(logger.Info).Infof("converted %d blocks...", cnt)
}
return nil, false
}
// upgradeSequentialOrphanedReceipts removes any old format block receipts from the
// database that did not have a corresponding block
func upgradeSequentialOrphanedReceipts(db ethdb.Database, stopFn func() bool) (error, bool) {
prefix := []byte("receipts-block-")
it := db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(prefix)
cnt := 0
for bytes.HasPrefix(it.Key(), prefix) {
// phase 2 already converted receipts belonging to existing
// blocks, just remove if there's anything left
cnt++
if err := db.Delete(it.Key()); err != nil {
return err, false
}
if stopFn() {
return nil, true
}
it.Next()
}
if cnt > 0 {
glog.V(logger.Info).Infof("removed %d orphaned block receipts...", cnt)
}
return nil, false
}
// upgradeSequentialBlockData upgrades the header, body, td and block receipts
// database entries belonging to a single hash (doesn't delete old data).
func upgradeSequentialBlockData(db ethdb.Database, hash []byte) error {
// get old chain data and block number
headerRLP, _ := db.Get(append(append([]byte("block-"), hash...), []byte("-header")...))
if len(headerRLP) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(headerRLP), header); err != nil {
return err
}
number := header.Number.Uint64()
bodyRLP, _ := db.Get(append(append([]byte("block-"), hash...), []byte("-body")...))
tdRLP, _ := db.Get(append(append([]byte("block-"), hash...), []byte("-td")...))
receiptsRLP, _ := db.Get(append([]byte("receipts-block-"), hash...))
// store new hash -> number association
encNum := make([]byte, 8)
binary.BigEndian.PutUint64(encNum, number)
if err := db.Put(append([]byte("H"), hash...), encNum); err != nil {
return err
}
// store new chain data
if err := db.Put(append(append([]byte("h"), encNum...), hash...), headerRLP); err != nil {
return err
}
if len(tdRLP) != 0 {
if err := db.Put(append(append(append([]byte("h"), encNum...), hash...), []byte("t")...), tdRLP); err != nil {
return err
}
}
if len(bodyRLP) != 0 {
if err := db.Put(append(append([]byte("b"), encNum...), hash...), bodyRLP); err != nil {
return err
}
}
if len(receiptsRLP) != 0 {
if err := db.Put(append(append([]byte("r"), encNum...), hash...), receiptsRLP); err != nil {
return err
}
}
return nil
}
// upgradeChainDatabase ensures that the chain database stores block split into
// separate header and body entries.
func upgradeChainDatabase(db ethdb.Database) error {
// Short circuit if the head block is stored already as separate header and body
data, err := db.Get([]byte("LastBlock"))
if err != nil {
return nil
}
head := common.BytesToHash(data)
if block := core.GetBlockByHashOld(db, head); block == nil {
return nil
}
// At least some of the database is still the old format, upgrade (skip the head block!)
glog.V(logger.Info).Info("Old database detected, upgrading...")
if db, ok := db.(*ethdb.LDBDatabase); ok {
blockPrefix := []byte("block-hash-")
for it := db.NewIterator(); it.Next(); {
// Skip anything other than a combined block
if !bytes.HasPrefix(it.Key(), blockPrefix) {
continue
}
// Skip the head block (merge last to signal upgrade completion)
if bytes.HasSuffix(it.Key(), head.Bytes()) {
continue
}
// Load the block, split and serialize (order!)
block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))
if err := core.WriteTd(db, block.Hash(), block.NumberU64(), block.DeprecatedTd()); err != nil {
return err
}
if err := core.WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
}
if err := core.WriteHeader(db, block.Header()); err != nil {
return err
}
if err := db.Delete(it.Key()); err != nil {
return err
}
}
// Lastly, upgrade the head block, disabling the upgrade mechanism
current := core.GetBlockByHashOld(db, head)
if err := core.WriteTd(db, current.Hash(), current.NumberU64(), current.DeprecatedTd()); err != nil {
return err
}
if err := core.WriteBody(db, current.Hash(), current.NumberU64(), current.Body()); err != nil {
return err
}
if err := core.WriteHeader(db, current.Header()); err != nil {
return err
}
}
return nil
}
func addMipmapBloomBins(db ethdb.Database) (err error) {
const mipmapVersion uint = 2
// check if the version is set. We ignore data for now since there's
// only one version so we can easily ignore it for now
var data []byte
data, _ = db.Get([]byte("setting-mipmap-version"))
if len(data) > 0 {
var version uint
if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion {
return nil
}
}
defer func() {
if err == nil {
var val []byte
val, err = rlp.EncodeToBytes(mipmapVersion)
if err == nil {
err = db.Put([]byte("setting-mipmap-version"), val)
}
return
}
}()
latestHash := core.GetHeadBlockHash(db)
latestBlock := core.GetBlock(db, latestHash, core.GetBlockNumber(db, latestHash))
if latestBlock == nil { // clean database
return
}
tstart := time.Now()
glog.V(logger.Info).Infoln("upgrading db log bloom bins")
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
hash := core.GetCanonicalHash(db, i)
if (hash == common.Hash{}) {
return fmt.Errorf("chain db corrupted. Could not find block %d.", i)
}
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
}
glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart))
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,8 @@ import (
"errors"
"fmt"
"math"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
@ -31,8 +33,8 @@ import (
)
const (
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
throughputImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
)
// Hash and block fetchers belonging to eth/61 and below
@ -58,15 +60,20 @@ type peer struct {
id string // Unique identifier of the peer
head common.Hash // Hash of the peers latest known block
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
headerThroughput float64 // Number of headers measured to be retrievable per second
blockThroughput float64 // Number of blocks (bodies) measured to be retrievable per second
receiptThroughput float64 // Number of receipts measured to be retrievable per second
stateThroughput float64 // Number of node data pieces measured to be retrievable per second
blockStarted time.Time // Time instance when the last block (body)fetch was started
rtt time.Duration // Request round trip time to track responsiveness (QoS)
headerStarted time.Time // Time instance when the last header fetch was started
blockStarted time.Time // Time instance when the last block (body) fetch was started
receiptStarted time.Time // Time instance when the last receipt fetch was started
stateStarted time.Time // Time instance when the last node data fetch was started
@ -118,10 +125,12 @@ func (p *peer) Reset() {
p.lock.Lock()
defer p.lock.Unlock()
atomic.StoreInt32(&p.headerIdle, 0)
atomic.StoreInt32(&p.blockIdle, 0)
atomic.StoreInt32(&p.receiptIdle, 0)
atomic.StoreInt32(&p.stateIdle, 0)
p.headerThroughput = 0
p.blockThroughput = 0
p.receiptThroughput = 0
p.stateThroughput = 0
@ -151,6 +160,24 @@ func (p *peer) Fetch61(request *fetchRequest) error {
return nil
}
// FetchHeaders sends a header retrieval request to the remote peer.
func (p *peer) FetchHeaders(from uint64, count int) error {
// Sanity check the protocol version
if p.version < 62 {
panic(fmt.Sprintf("header fetch [eth/62+] requested on eth/%d", p.version))
}
// Short circuit if the peer is already fetching
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
return errAlreadyFetching
}
p.headerStarted = time.Now()
// Issue the header retrieval request (absolut upwards without gaps)
go p.getAbsHeaders(from, count, 0, false)
return nil
}
// FetchBodies sends a block body retrieval request to the remote peer.
func (p *peer) FetchBodies(request *fetchRequest) error {
// Sanity check the protocol version
@ -217,6 +244,13 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
return nil
}
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
// requests. Its estimated header retrieval throughput is updated with that measured
// just now.
func (p *peer) SetHeadersIdle(delivered int) {
p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle)
}
// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval
// requests. Its estimated block retrieval throughput is updated with that measured
// just now.
@ -260,35 +294,47 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id
return
}
// Otherwise update the throughput with a new measurement
measured := float64(delivered) / (float64(time.Since(started)+1) / float64(time.Second)) // +1 (ns) to ensure non-zero divisor
*throughput = (1-throughputImpact)*(*throughput) + throughputImpact*measured
elapsed := time.Since(started) + 1 // +1 (ns) to ensure non-zero divisor
measured := float64(delivered) / (float64(elapsed) / float64(time.Second))
*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured
p.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed))
}
// HeaderCapacity retrieves the peers header download allowance based on its
// previously discovered throughput.
func (p *peer) HeaderCapacity(targetRTT time.Duration) int {
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch)))
}
// BlockCapacity retrieves the peers block download allowance based on its
// previously discovered throughput.
func (p *peer) BlockCapacity() int {
func (p *peer) BlockCapacity(targetRTT time.Duration) int {
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.blockThroughput*float64(blockTargetRTT)/float64(time.Second), float64(MaxBlockFetch))))
return int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch)))
}
// ReceiptCapacity retrieves the peers receipt download allowance based on its
// previously discovered throughput.
func (p *peer) ReceiptCapacity() int {
func (p *peer) ReceiptCapacity(targetRTT time.Duration) int {
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.receiptThroughput*float64(receiptTargetRTT)/float64(time.Second), float64(MaxReceiptFetch))))
return int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch)))
}
// NodeDataCapacity retrieves the peers state download allowance based on its
// previously discovered throughput.
func (p *peer) NodeDataCapacity() int {
func (p *peer) NodeDataCapacity(targetRTT time.Duration) int {
p.lock.RLock()
defer p.lock.RUnlock()
return int(math.Max(1, math.Min(p.stateThroughput*float64(stateTargetRTT)/float64(time.Second), float64(MaxStateFetch))))
return int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch)))
}
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
@ -322,15 +368,17 @@ func (p *peer) String() string {
p.lock.RLock()
defer p.lock.RUnlock()
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("blocks %3.2f/s, ", p.blockThroughput)+
fmt.Sprintf("receipts %3.2f/s, ", p.receiptThroughput)+
fmt.Sprintf("states %3.2f/s, ", p.stateThroughput)+
fmt.Sprintf("lacking %4d", len(p.lacking)),
)
return fmt.Sprintf("Peer %s [%s]", p.id, strings.Join([]string{
fmt.Sprintf("hs %3.2f/s", p.headerThroughput),
fmt.Sprintf("bs %3.2f/s", p.blockThroughput),
fmt.Sprintf("rs %3.2f/s", p.receiptThroughput),
fmt.Sprintf("ss %3.2f/s", p.stateThroughput),
fmt.Sprintf("miss %4d", len(p.lacking)),
fmt.Sprintf("rtt %v", p.rtt),
}, ", "))
}
// peerSet represents the collection of active peer participating in the block
// peerSet represents the collection of active peer participating in the chain
// download procedure.
type peerSet struct {
peers map[string]*peer
@ -359,9 +407,13 @@ func (ps *peerSet) Reset() {
// peer is already known.
//
// The method also sets the starting throughput values of the new peer to the
// average of all existing peers, to give it a realistic change of being used
// average of all existing peers, to give it a realistic chance of being used
// for data retrievals.
func (ps *peerSet) Register(p *peer) error {
// Retrieve the current median RTT as a sane default
p.rtt = ps.medianRTT()
// Register the new peer with some meaningful defaults
ps.lock.Lock()
defer ps.lock.Unlock()
@ -369,15 +421,17 @@ func (ps *peerSet) Register(p *peer) error {
return errAlreadyRegistered
}
if len(ps.peers) > 0 {
p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0
p.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0
for _, peer := range ps.peers {
peer.lock.RLock()
p.headerThroughput += peer.headerThroughput
p.blockThroughput += peer.blockThroughput
p.receiptThroughput += peer.receiptThroughput
p.stateThroughput += peer.stateThroughput
peer.lock.RUnlock()
}
p.headerThroughput /= float64(len(ps.peers))
p.blockThroughput /= float64(len(ps.peers))
p.receiptThroughput /= float64(len(ps.peers))
p.stateThroughput /= float64(len(ps.peers))
@ -441,6 +495,20 @@ func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
return ps.idlePeers(61, 61, idle, throughput)
}
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
// within the active peer set, ordered by their reputation.
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
idle := func(p *peer) bool {
return atomic.LoadInt32(&p.headerIdle) == 0
}
throughput := func(p *peer) float64 {
p.lock.RLock()
defer p.lock.RUnlock()
return p.headerThroughput
}
return ps.idlePeers(62, 64, idle, throughput)
}
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
// the active peer set, ordered by their reputation.
func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
@ -508,3 +576,34 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer)
}
return idle, total
}
// medianRTT returns the median RTT of te peerset, considering only the tuning
// peers if there are more peers available.
func (ps *peerSet) medianRTT() time.Duration {
// Gather all the currnetly measured round trip times
ps.lock.RLock()
defer ps.lock.RUnlock()
rtts := make([]float64, 0, len(ps.peers))
for _, p := range ps.peers {
p.lock.RLock()
rtts = append(rtts, float64(p.rtt))
p.lock.RUnlock()
}
sort.Float64s(rtts)
median := rttMaxEstimate
if qosTuningPeers <= len(rtts) {
median = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers
} else if len(rtts) > 0 {
median = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos)
}
// Restrict the RTT into some QoS defaults, irrelevant of true RTT
if median < rttMinEstimate {
median = rttMinEstimate
}
if median > rttMaxEstimate {
median = rttMaxEstimate
}
return median
}

View File

@ -40,7 +40,7 @@ import (
var (
blockCacheLimit = 8192 // Maximum number of blocks to cache before throttling the download
maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently
maxInFlightStates = 8192 // Maximum number of state downloads to allow concurrently
)
var (
@ -52,6 +52,7 @@ var (
// fetchRequest is a currently running data retrieval operation.
type fetchRequest struct {
Peer *peer // Peer to which the request was sent
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
Time time.Time // Time when the request was made
@ -79,6 +80,18 @@ type queue struct {
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
// Headers are "special", they download in batches, supported by a skeleton chain
headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
headerProced int // [eth/62] Number of headers already processed from the results
headerOffset uint64 // [eth/62] Number of the first header in the result cache
headerContCh chan bool // [eth/62] Channel to notify when header download finishes
// All data retrievals below are based on an already assembles header chain
blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers
blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for
blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations
@ -113,6 +126,8 @@ func newQueue(stateDb ethdb.Database) *queue {
return &queue{
hashPool: make(map[common.Hash]int),
hashQueue: prque.New(),
headerPendPool: make(map[string]*fetchRequest),
headerContCh: make(chan bool),
blockTaskPool: make(map[common.Hash]*types.Header),
blockTaskQueue: prque.New(),
blockPendPool: make(map[string]*fetchRequest),
@ -149,6 +164,8 @@ func (q *queue) Reset() {
q.headerHead = common.Hash{}
q.headerPendPool = make(map[string]*fetchRequest)
q.blockTaskPool = make(map[common.Hash]*types.Header)
q.blockTaskQueue.Reset()
q.blockPendPool = make(map[string]*fetchRequest)
@ -178,6 +195,14 @@ func (q *queue) Close() {
q.active.Broadcast()
}
// PendingHeaders retrieves the number of header requests pending for retrieval.
func (q *queue) PendingHeaders() int {
q.lock.Lock()
defer q.lock.Unlock()
return q.headerTaskQueue.Size()
}
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
func (q *queue) PendingBlocks() int {
q.lock.Lock()
@ -205,6 +230,15 @@ func (q *queue) PendingNodeData() int {
return 0
}
// InFlightHeaders retrieves whether there are header fetch requests currently
// in flight.
func (q *queue) InFlightHeaders() bool {
q.lock.Lock()
defer q.lock.Unlock()
return len(q.headerPendPool) > 0
}
// InFlightBlocks retrieves whether there are block fetch requests currently in
// flight.
func (q *queue) InFlightBlocks() bool {
@ -317,6 +351,45 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
return inserts
}
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
// up an already retrieved header skeleton.
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
q.lock.Lock()
defer q.lock.Unlock()
// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
if q.headerResults != nil {
panic("skeleton assembly already in progress")
}
// Shedule all the header retrieval tasks for the skeleton assembly
q.headerTaskPool = make(map[uint64]*types.Header)
q.headerTaskQueue = prque.New()
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
q.headerProced = 0
q.headerOffset = from
q.headerContCh = make(chan bool, 1)
for i, header := range skeleton {
index := from + uint64(i*MaxHeaderFetch)
q.headerTaskPool[index] = header
q.headerTaskQueue.Push(index, -float32(index))
}
}
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
// skeleton.
func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
q.lock.Lock()
defer q.lock.Unlock()
headers, proced := q.headerResults, q.headerProced
q.headerResults, q.headerProced = nil, 0
return headers, proced
}
// Schedule adds a set of headers for the download queue for scheduling, returning
// the new headers encountered.
func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
@ -437,6 +510,46 @@ func (q *queue) countProcessableItems() int {
return len(q.resultCache)
}
// ReserveHeaders reserves a set of headers for the given peer, skipping any
// previously failed batches.
func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the peer's already downloading something (sanity check to
// not corrupt state)
if _, ok := q.headerPendPool[p.id]; ok {
return nil
}
// Retrieve a batch of hashes, skipping previously failed ones
send, skip := uint64(0), []uint64{}
for send == 0 && !q.headerTaskQueue.Empty() {
from, _ := q.headerTaskQueue.Pop()
if q.headerPeerMiss[p.id] != nil {
if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
skip = append(skip, from.(uint64))
continue
}
}
send = from.(uint64)
}
// Merge all the skipped batches back
for _, from := range skip {
q.headerTaskQueue.Push(from, -float32(from))
}
// Assemble and return the block download request
if send == 0 {
return nil
}
request := &fetchRequest{
Peer: p,
From: send,
Time: time.Now(),
}
q.headerPendPool[p.id] = request
return request
}
// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
// previously failed download.
func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
@ -635,6 +748,11 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ
return request, progress, nil
}
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
func (q *queue) CancelHeaders(request *fetchRequest) {
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
}
// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
func (q *queue) CancelBlocks(request *fetchRequest) {
q.cancel(request, q.hashQueue, q.blockPendPool)
@ -663,6 +781,9 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m
q.lock.Lock()
defer q.lock.Unlock()
if request.From > 0 {
taskQueue.Push(request.From, -float32(request.From))
}
for hash, index := range request.Hashes {
taskQueue.Push(hash, float32(index))
}
@ -702,6 +823,15 @@ func (q *queue) Revoke(peerId string) {
}
}
// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
// canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
}
// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
// canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
@ -753,6 +883,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
timeoutMeter.Mark(1)
// Return any non satisfied requests to the pool
if request.From > 0 {
taskQueue.Push(request.From, -float32(request.From))
}
for hash, index := range request.Hashes {
taskQueue.Push(hash, float32(index))
}
@ -842,6 +975,94 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
}
}
// DeliverHeaders injects a header retrieval response into the header results
// cache. This method either accepts all headers it received, or none of them
// if they do not map correctly to the skeleton.
//
// If the headers are accepted, the method makes an attempt to deliver the set
// of ready headers to the processor to keep the pipeline full. However it will
// not block to prevent stalling other pending deliveries.
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the data was never requested
request := q.headerPendPool[id]
if request == nil {
return 0, errNoFetchesPending
}
headerReqTimer.UpdateSince(request.Time)
delete(q.headerPendPool, id)
// Ensure headers can be mapped onto the skeleton chain
target := q.headerTaskPool[request.From].Hash()
accepted := len(headers) == MaxHeaderFetch
if accepted {
if headers[0].Number.Uint64() != request.From {
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From)
accepted = false
} else if headers[len(headers)-1].Hash() != target {
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4])
accepted = false
}
}
if accepted {
for i, header := range headers[1:] {
hash := header.Hash()
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ordering, expected %d", id, header.Number, hash[:4], want)
accepted = false
break
}
if headers[i].Hash() != header.ParentHash {
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x] broke chain ancestry", id, header.Number, hash[:4])
accepted = false
break
}
}
}
// If the batch of headers wasn't accepted, mark as unavailable
if !accepted {
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From)
miss := q.headerPeerMiss[id]
if miss == nil {
q.headerPeerMiss[id] = make(map[uint64]struct{})
miss = q.headerPeerMiss[id]
}
miss[request.From] = struct{}{}
q.headerTaskQueue.Push(request.From, -float32(request.From))
return 0, errors.New("delivery not accepted")
}
// Clean up a successful fetch and try to deliver any sub-results
copy(q.headerResults[request.From-q.headerOffset:], headers)
delete(q.headerTaskPool, request.From)
ready := 0
for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
ready += MaxHeaderFetch
}
if ready > 0 {
// Headers are ready for delivery, gather them and push forward (non blocking)
process := make([]*types.Header, ready)
copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
select {
case headerProcCh <- process:
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number)
q.headerProced += len(process)
default:
}
}
// Check for termination and return
if len(q.headerTaskPool) == 0 {
q.headerContCh <- false
}
return len(headers), nil
}
// DeliverBodies injects a block body retrieval response into the results queue.
// The method returns the number of blocks bodies accepted from the delivery and
// also wakes any threads waiting for data delivery.
@ -1041,13 +1262,19 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error,
// Prepare configures the result cache to allow accepting and caching inbound
// fetch results.
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) {
func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64, head *types.Header) {
q.lock.Lock()
defer q.lock.Unlock()
// Prepare the queue for sync results
if q.resultOffset < offset {
q.resultOffset = offset
}
q.fastSyncPivot = pivot
q.mode = mode
// If long running fast sync, also start up a head stateretrieval immediately
if mode == FastSync && pivot > 0 {
q.stateScheduler = state.NewStateSync(head.Root, q.stateDatabase)
}
}

View File

@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
@ -50,10 +51,11 @@ const (
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct {
mux *event.TypeMux
apiBackend ethapi.Backend
quit chan struct{}
chainDb ethdb.Database
mux *event.TypeMux
filterManager *FilterSystem
@ -73,11 +75,12 @@ type PublicFilterAPI struct {
}
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI {
func NewPublicFilterAPI(apiBackend ethapi.Backend) *PublicFilterAPI {
svc := &PublicFilterAPI{
mux: mux,
chainDb: chainDb,
filterManager: NewFilterSystem(mux),
apiBackend: apiBackend,
mux: apiBackend.EventMux(),
chainDb: apiBackend.ChainDb(),
filterManager: NewFilterSystem(apiBackend.EventMux()),
filterMapping: make(map[string]int),
logQueue: make(map[int]*logQueue),
blockQueue: make(map[int]*hashQueue),
@ -141,7 +144,7 @@ func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
}
s.blockMu.Lock()
filter := New(s.chainDb)
filter := New(s.apiBackend)
id, err := s.filterManager.Add(filter, ChainFilter)
if err != nil {
return "", err
@ -177,7 +180,7 @@ func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) {
s.transactionMu.Lock()
defer s.transactionMu.Unlock()
filter := New(s.chainDb)
filter := New(s.apiBackend)
id, err := s.filterManager.Add(filter, PendingTxFilter)
if err != nil {
return "", err
@ -206,7 +209,7 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
s.logMu.Lock()
defer s.logMu.Unlock()
filter := New(s.chainDb)
filter := New(s.apiBackend)
id, err := s.filterManager.Add(filter, LogFilter)
if err != nil {
return 0, err
@ -233,6 +236,7 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
return id, nil
}
// Logs creates a subscription that fires for all new log that match the given filter criteria.
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
@ -291,12 +295,13 @@ type NewFilterArgs struct {
Topics [][]common.Hash
}
// UnmarshalJSON sets *args fields with given data.
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
type input struct {
From *rpc.BlockNumber `json:"fromBlock"`
ToBlock *rpc.BlockNumber `json:"toBlock"`
Addresses interface{} `json:"address"`
Topics interface{} `json:"topics"`
Topics []interface{} `json:"topics"`
}
var raw input
@ -321,7 +326,6 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
if raw.Addresses != nil {
// raw.Address can contain a single address or an array of addresses
var addresses []common.Address
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
for i, addr := range strAddrs {
if strAddr, ok := addr.(string); ok {
@ -352,56 +356,53 @@ func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
args.Addresses = addresses
}
// helper function which parses a string to a topic hash
topicConverter := func(raw string) (common.Hash, error) {
if len(raw) == 0 {
return common.Hash{}, nil
}
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
raw = raw[2:]
}
if len(raw) != 2*common.HashLength {
return common.Hash{}, errors.New("invalid topic(s)")
}
if decAddr, err := hex.DecodeString(raw); err == nil {
return common.BytesToHash(decAddr), nil
}
return common.Hash{}, errors.New("invalid topic given")
return common.Hash{}, errors.New("invalid topic(s)")
}
// topics is an array consisting of strings or arrays of strings
if raw.Topics != nil {
topics, ok := raw.Topics.([]interface{})
if ok {
parsedTopics := make([][]common.Hash, len(topics))
for i, topic := range topics {
if topic == nil {
parsedTopics[i] = []common.Hash{common.StringToHash("")}
} else if strTopic, ok := topic.(string); ok {
if t, err := topicConverter(strTopic); err != nil {
return fmt.Errorf("invalid topic on index %d", i)
} else {
parsedTopics[i] = []common.Hash{t}
}
} else if arrTopic, ok := topic.([]interface{}); ok {
parsedTopics[i] = make([]common.Hash, len(arrTopic))
for j := 0; j < len(parsedTopics[i]); i++ {
if arrTopic[j] == nil {
parsedTopics[i][j] = common.StringToHash("")
} else if str, ok := arrTopic[j].(string); ok {
if t, err := topicConverter(str); err != nil {
return fmt.Errorf("invalid topic on index %d", i)
} else {
parsedTopics[i] = []common.Hash{t}
}
} else {
return fmt.Errorf("topic[%d][%d] not a string", i, j)
}
}
} else {
return fmt.Errorf("topic[%d] invalid", i)
// topics is an array consisting of strings and/or arrays of strings.
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
if len(raw.Topics) > 0 {
args.Topics = make([][]common.Hash, len(raw.Topics))
for i, t := range raw.Topics {
if t == nil { // ignore topic when matching logs
args.Topics[i] = []common.Hash{common.Hash{}}
} else if topic, ok := t.(string); ok { // match specific topic
top, err := topicConverter(topic)
if err != nil {
return err
}
args.Topics[i] = []common.Hash{top}
} else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"]
for _, rawTopic := range topics {
if rawTopic == nil {
args.Topics[i] = append(args.Topics[i], common.Hash{})
} else if topic, ok := rawTopic.(string); ok {
parsed, err := topicConverter(topic)
if err != nil {
return err
}
args.Topics[i] = append(args.Topics[i], parsed)
} else {
return fmt.Errorf("invalid topic(s)")
}
}
} else {
return fmt.Errorf("invalid topic(s)")
}
args.Topics = parsedTopics
}
}
@ -433,14 +434,15 @@ func (s *PublicFilterAPI) NewFilter(args NewFilterArgs) (string, error) {
}
// GetLogs returns the logs matching the given argument.
func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog {
filter := New(s.chainDb)
func (s *PublicFilterAPI) GetLogs(ctx context.Context, args NewFilterArgs) ([]vmlog, error) {
filter := New(s.apiBackend)
filter.SetBeginBlock(args.FromBlock.Int64())
filter.SetEndBlock(args.ToBlock.Int64())
filter.SetAddresses(args.Addresses)
filter.SetTopics(args.Topics)
return toRPCLogs(filter.Find(), false)
logs, err := filter.Find(ctx)
return toRPCLogs(logs, false), err
}
// UninstallFilter removes the filter with the given filter id.
@ -497,7 +499,8 @@ func (s *PublicFilterAPI) blockFilterChanged(id int) []common.Hash {
defer s.blockMu.Unlock()
if s.blockQueue[id] != nil {
return s.blockQueue[id].get()
res := s.blockQueue[id].get()
return res
}
return nil
}
@ -526,17 +529,18 @@ func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog {
}
// GetFilterLogs returns the logs for the filter with the given id.
func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
func (s *PublicFilterAPI) GetFilterLogs(ctx context.Context, filterId string) ([]vmlog, error) {
id, ok := s.filterMapping[filterId]
if !ok {
return toRPCLogs(nil, false)
return toRPCLogs(nil, false), nil
}
if filter := s.filterManager.Get(id); filter != nil {
return toRPCLogs(filter.Find(), false)
logs, err := filter.Find(ctx)
return toRPCLogs(logs, false), err
}
return toRPCLogs(nil, false)
return toRPCLogs(nil, false), nil
}
// GetFilterChanges returns the logs for the filter with the given id since last time is was called.

View File

@ -17,14 +17,17 @@
package filters
import (
"math"
// "math"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
// "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
type AccountChange struct {
@ -33,6 +36,8 @@ type AccountChange struct {
// Filtering interface
type Filter struct {
apiBackend ethapi.Backend
created time.Time
db ethdb.Database
@ -47,8 +52,11 @@ type Filter struct {
// Create a new filter which uses a bloom filter on blocks to figure out whether a particular block
// is interesting or not.
func New(db ethdb.Database) *Filter {
return &Filter{db: db}
func New(apiBackend ethapi.Backend) *Filter {
return &Filter{
apiBackend: apiBackend,
db: apiBackend.ChainDb(),
}
}
// Set the earliest and latest block for filtering.
@ -71,27 +79,28 @@ func (self *Filter) SetTopics(topics [][]common.Hash) {
}
// Run filters logs with the current parameters set
func (self *Filter) Find() vm.Logs {
latestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db))
func (self *Filter) Find(ctx context.Context) (vm.Logs, error) {
headBlockNumber := self.apiBackend.HeaderByNumber(rpc.LatestBlockNumber).Number.Uint64()
var beginBlockNo uint64 = uint64(self.begin)
if self.begin == -1 {
beginBlockNo = latestBlock.NumberU64()
beginBlockNo = headBlockNumber
}
var endBlockNo uint64 = uint64(self.end)
if self.end == -1 {
endBlockNo = latestBlock.NumberU64()
endBlockNo = headBlockNumber
}
// if no addresses are present we can't make use of fast search which
// uses the mipmap bloom filters to check for fast inclusion and uses
// higher range probability in order to ensure at least a false positive
if len(self.addresses) == 0 {
return self.getLogs(beginBlockNo, endBlockNo)
}
return self.mipFind(beginBlockNo, endBlockNo, 0)
// if len(self.addresses) == 0 {
return self.getLogs(ctx, beginBlockNo, endBlockNo)
// }
// return self.mipFind(beginBlockNo, endBlockNo, 0)
}
func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) {
/*func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) {
level := core.MIPMapLevels[depth]
// normalise numerator so we can work in level specific batches and
// work with the proper range checks
@ -119,27 +128,24 @@ func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) {
}
return logs
}
func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) {
var block *types.Block
}*/
func (self *Filter) getLogs(ctx context.Context, start, end uint64) (logs vm.Logs, err error) {
for i := start; i <= end; i++ {
hash := core.GetCanonicalHash(self.db, i)
if hash != (common.Hash{}) {
block = core.GetBlock(self.db, hash)
} else { // block not found
return logs
header := self.apiBackend.HeaderByNumber(rpc.BlockNumber(i))
if header == nil {
return logs, nil
}
// Use bloom filtering to see if this block is interesting given the
// current parameters
if self.bloomFilter(block) {
if self.bloomFilter(header.Bloom) {
// Get the logs of the block
var (
receipts = core.GetBlockReceipts(self.db, block.Hash())
unfiltered vm.Logs
)
receipts, err := self.apiBackend.GetReceipts(ctx, header.Hash())
if err != nil {
return nil, err
}
var unfiltered vm.Logs
for _, receipt := range receipts {
unfiltered = append(unfiltered, receipt.Logs...)
}
@ -147,7 +153,7 @@ func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) {
}
}
return logs
return logs, nil
}
func includes(addresses []common.Address, a common.Address) bool {
@ -201,11 +207,11 @@ Logs:
return ret
}
func (self *Filter) bloomFilter(block *types.Block) bool {
func (self *Filter) bloomFilter(bloom types.Bloom) bool {
if len(self.addresses) > 0 {
var included bool
for _, addr := range self.addresses {
if types.BloomLookup(block.Bloom(), addr) {
if types.BloomLookup(bloom, addr) {
included = true
break
}
@ -219,7 +225,7 @@ func (self *Filter) bloomFilter(block *types.Block) bool {
for _, sub := range self.topics {
var included bool
for _, topic := range sub {
if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) {
if (topic == common.Hash{}) || types.BloomLookup(bloom, topic) {
included = true
break
}

View File

@ -53,9 +53,9 @@ type GpoParams struct {
// GasPriceOracle recommends gas prices based on the content of recent
// blocks.
type GasPriceOracle struct {
chain *core.BlockChain
db ethdb.Database
evmux *event.TypeMux
chain *core.BlockChain
db ethdb.Database
evmux *event.TypeMux
params *GpoParams
initOnce sync.Once
minPrice *big.Int
@ -79,9 +79,9 @@ func NewGasPriceOracle(chain *core.BlockChain, db ethdb.Database, evmux *event.T
minbase = minbase.Div(minbase, big.NewInt(int64(params.GpobaseCorrectionFactor)))
}
return &GasPriceOracle{
chain: chain,
db: db,
evmux: evmux,
chain: chain,
db: db,
evmux: evmux,
params: params,
blocks: make(map[uint64]*blockPriceInfo),
minBase: minbase,
@ -183,7 +183,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int {
gasUsed := big.NewInt(0)
receipts := core.GetBlockReceipts(self.db, block.Hash())
receipts := core.GetBlockReceipts(self.db, block.Hash(), block.NumberU64())
if len(receipts) > 0 {
if cgu := receipts[len(receipts)-1].CumulativeGasUsed; cgu != nil {
gasUsed = receipts[len(receipts)-1].CumulativeGasUsed

View File

@ -22,24 +22,24 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
const (
LpoAvgCount = 5
LpoMinCount = 3
LpoMaxBlocks = 20
LpoSelect = 50
LpoAvgCount = 5
LpoMinCount = 3
LpoMaxBlocks = 20
LpoSelect = 50
LpoDefaultPrice = 20000000000
)
// LightPriceOracle recommends gas prices based on the content of recent
// blocks. Suitable for both light and full clients.
type LightPriceOracle struct {
backend ethapi.Backend
lastHead common.Hash
backend ethapi.Backend
lastHead common.Hash
lastPrice *big.Int
cacheLock sync.RWMutex
fetchLock sync.Mutex
@ -48,7 +48,7 @@ type LightPriceOracle struct {
// NewLightPriceOracle returns a new oracle.
func NewLightPriceOracle(backend ethapi.Backend) *LightPriceOracle {
return &LightPriceOracle{
backend: backend,
backend: backend,
lastPrice: big.NewInt(LpoDefaultPrice),
}
}
@ -59,13 +59,13 @@ func (self *LightPriceOracle) SuggestPrice(ctx context.Context) (*big.Int, error
lastHead := self.lastHead
lastPrice := self.lastPrice
self.cacheLock.RUnlock()
head := self.backend.HeaderByNumber(rpc.LatestBlockNumber)
headHash := head.Hash()
if headHash == lastHead {
return lastPrice, nil
}
self.fetchLock.Lock()
defer self.fetchLock.Unlock()
@ -116,7 +116,7 @@ func (self *LightPriceOracle) SuggestPrice(ctx context.Context) (*big.Int, error
sort.Sort(lps)
price = lps[(len(lps)-1)*LpoSelect/100]
}
self.cacheLock.Lock()
self.lastHead = headHash
self.lastPrice = price
@ -126,7 +126,7 @@ func (self *LightPriceOracle) SuggestPrice(ctx context.Context) (*big.Int, error
type lpResult struct {
price *big.Int
err error
err error
}
// getLowestPrice calculates the lowest transaction gas price in a given block

View File

@ -22,6 +22,7 @@ import (
"math"
"math/big"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@ -58,7 +59,9 @@ type blockFetcherFn func([]common.Hash) error
type ProtocolManager struct {
networkId int
fastSync bool
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
synced uint32 // Flag whether we're considered synchronised (enables transaction processing)
txpool txPool
blockchain *core.BlockChain
chaindb ethdb.Database
@ -82,20 +85,16 @@ type ProtocolManager struct {
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
badBlockReportingEnabled bool
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Figure out whether to allow fast sync or not
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
fastSync = false
}
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
fastSync: fastSync,
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
@ -106,6 +105,14 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
// Figure out whether to allow fast sync or not
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
fastSync = false
}
if fastSync {
manager.fastSync = uint32(1)
}
// Initiate a sub-protocol for every implemented version we can handle
manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
for i, version := range ProtocolVersions {
@ -145,9 +152,9 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
return nil, errIncompatibleConfig
}
// Construct the different synchronisation mechanisms
manager.downloader = downloader.New(downloader.FullSync, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeader,
blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
manager.downloader = downloader.New(downloader.FullSync, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash,
blockchain.GetBlockByHash, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead,
blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error {
@ -156,11 +163,28 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int,
heighter := func() uint64 {
return blockchain.CurrentBlock().NumberU64()
}
manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer)
inserter := func(blocks types.Blocks) (int, error) {
atomic.StoreUint32(&manager.synced, 1) // Mark initial sync done on any fetcher import
return manager.insertChain(blocks)
}
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled")
manager.badBlockReportingEnabled = true
}
return manager, nil
}
func (pm *ProtocolManager) insertChain(blocks types.Blocks) (i int, err error) {
i, err = pm.blockchain.InsertChain(blocks)
if pm.badBlockReportingEnabled && core.IsValidationErr(err) && i < len(blocks) {
go sendBadBlockReport(blocks[i], err)
}
return i, err
}
func (pm *ProtocolManager) removePeer(id string) {
// Short circuit if the peer was already removed
peer := pm.peers.Peer(id)
@ -225,6 +249,10 @@ func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *p
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
if pm.peers.Len() >= 20 {
return p2p.DiscTooManyPeers
}
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
// Execute the Ethereum handshake
@ -358,7 +386,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested block, stopping if enough was found
if block := pm.blockchain.GetBlock(hash); block != nil {
if block := pm.blockchain.GetBlockByHash(hash); block != nil {
blocks = append(blocks, block)
bytes += block.Size()
}
@ -375,6 +403,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Update the receive timestamp of each block
for _, block := range blocks {
block.ReceivedAt = msg.ReceivedAt
block.ReceivedFrom = p
}
// Filter out any explicitly requested blocks, deliver the rest to the downloader
if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
@ -400,13 +429,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Retrieve the next header satisfying the query
var origin *types.Header
if hashMode {
origin = pm.blockchain.GetHeader(query.Origin.Hash)
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
bytes += estHeaderRlpSize
@ -415,8 +445,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
// Hash based traversal towards the genesis block
for i := 0; i < int(query.Skip)+1; i++ {
if header := pm.blockchain.GetHeader(query.Origin.Hash); header != nil {
if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
query.Origin.Hash = header.ParentHash
number--
} else {
unknown = true
break
@ -577,9 +608,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Retrieve the requested block's receipts, skipping if unknown to us
results := core.GetBlockReceipts(pm.chaindb, hash)
results := core.GetBlockReceipts(pm.chaindb, hash, core.GetBlockNumber(pm.chaindb, hash))
if results == nil {
if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
}
}
@ -661,6 +692,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "block validation %v: %v", msg, err)
}
request.Block.ReceivedAt = msg.ReceivedAt
request.Block.ReceivedFrom = p
// Mark the peer as owning the block and schedule it for import
p.MarkBlock(request.Block.Hash())
@ -671,14 +703,19 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Update the peers total difficulty if needed, schedule a download if gapped
if request.TD.Cmp(p.Td()) > 0 {
p.SetTd(request.TD)
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
currentBlock := pm.blockchain.CurrentBlock()
td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 {
go pm.synchronise(p)
}
}
case msg.Code == TxMsg:
// Transactions arrived, parse all of them and deliver to the pool
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if atomic.LoadUint32(&pm.synced) == 0 {
break
}
// Transactions can be processed, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
@ -708,8 +745,8 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
if propagate {
// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
var td *big.Int
if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil {
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash()))
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
} else {
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
return
@ -777,10 +814,11 @@ type EthNodeInfo struct {
// NodeInfo retrieves some protocol metadata about the running host node.
func (self *ProtocolManager) NodeInfo() *EthNodeInfo {
currentBlock := self.blockchain.CurrentBlock()
return &EthNodeInfo{
Network: self.networkId,
Difficulty: self.blockchain.GetTd(self.blockchain.CurrentBlock().Hash()),
Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
Genesis: self.blockchain.Genesis().Hash(),
Head: self.blockchain.CurrentBlock().Hash(),
Head: currentBlock.Hash(),
}
}

View File

@ -18,6 +18,7 @@ package eth
import (
"math/rand"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@ -161,24 +162,27 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
return
}
// Make sure the peer's TD is higher than our own. If not drop.
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
currentBlock := pm.blockchain.CurrentBlock()
td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
if peer.Td().Cmp(td) <= 0 {
return
}
// Otherwise try to sync with the downloader
mode := downloader.FullSync
if pm.fastSync {
if atomic.LoadUint32(&pm.fastSync) == 1 {
mode = downloader.FastSync
}
if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
return
}
atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
// If fast sync was enabled, and we synced up, disable it
if pm.fastSync {
if atomic.LoadUint32(&pm.fastSync) == 1 {
// Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("fast sync complete, auto disabling")
pm.fastSync = false
atomic.StoreUint32(&pm.fastSync, 0)
}
}
}

View File

@ -39,15 +39,17 @@ var OpenFileLimit = 64
// cacheRatio specifies how the total alloted cache is distributed between the
// various system databases.
var cacheRatio = map[string]float64{
"dapp": 0.0,
"chaindata": 1.0,
"dapp": 0.0,
"chaindata": 1.0,
"lightchaindata": 1.0,
}
// handleRatio specifies how the total alloted file descriptors is distributed
// between the various system databases.
var handleRatio = map[string]float64{
"dapp": 0.0,
"chaindata": 1.0,
"dapp": 0.0,
"chaindata": 1.0,
"lightchaindata": 1.0,
}
type LDBDatabase struct {

View File

@ -22,9 +22,9 @@ import (
_ "net/http/pprof"
"runtime"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/urfave/cli.v1"
)
var (

View File

@ -1,18 +1,18 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethapi
@ -45,28 +45,19 @@ import (
"golang.org/x/net/context"
)
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
// have any code associated with it (i.e. suicided).
//
// Please note, this error string is part of the RPC API and is expected by the
// native contract bindings to signal this particular error. Do not change this
// as it will break all dependent code!
var ErrNoCode = errors.New("no contract code at given address")
const (
defaultGas = uint64(90000)
)
const defaultGas = uint64(90000)
// PublicEthereumAPI provides an API to access Ethereum related information.
// It offers only methods that operate on public data that is freely available to anyone.
type PublicEthereumAPI struct {
b Backend
b Backend
solcPath *string
solc **compiler.Solidity
}
// NewPublicEthereumAPI creates a new Etheruem protocol API.
func NewPublicEthereumAPI(b Backend) *PublicEthereumAPI {
return &PublicEthereumAPI{b}
func NewPublicEthereumAPI(b Backend, solcPath *string, solc **compiler.Solidity) *PublicEthereumAPI {
return &PublicEthereumAPI{b, solcPath, solc}
}
// GasPrice returns a suggestion for a gas price.
@ -74,10 +65,19 @@ func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*big.Int, error) {
return s.b.SuggestPrice(ctx)
}
func (s *PublicEthereumAPI) getSolc() (*compiler.Solidity, error) {
var err error
solc := *s.solc
if solc == nil {
solc, err = compiler.New(*s.solcPath)
}
return solc, err
}
// GetCompilers returns the collection of available smart contract compilers
func (s *PublicEthereumAPI) GetCompilers() ([]string, error) {
solc, err := s.b.Solc()
if err != nil && solc != nil {
solc, err := s.getSolc()
if err == nil && solc != nil {
return []string{"Solidity"}, nil
}
@ -86,7 +86,7 @@ func (s *PublicEthereumAPI) GetCompilers() ([]string, error) {
// CompileSolidity compiles the given solidity source
func (s *PublicEthereumAPI) CompileSolidity(source string) (map[string]*compiler.Contract, error) {
solc, err := s.b.Solc()
solc, err := s.getSolc()
if err != nil {
return nil, err
}
@ -237,14 +237,19 @@ func (s *PublicAccountAPI) Accounts() []accounts.Account {
}
// PrivateAccountAPI provides an API to access accounts managed by this node.
// It offers methods to create, (un)lock en list accounts.
// It offers methods to create, (un)lock en list accounts. Some methods accept
// passwords and are therefore considered private by default.
type PrivateAccountAPI struct {
am *accounts.Manager
b Backend
}
// NewPrivateAccountAPI create a new PrivateAccountAPI.
func NewPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI {
return &PrivateAccountAPI{am}
func NewPrivateAccountAPI(b Backend) *PrivateAccountAPI {
return &PrivateAccountAPI{
am: b.AccountManager(),
b: b,
}
}
// ListAccounts will return a list of addresses for accounts this node manages.
@ -258,14 +263,16 @@ func (s *PrivateAccountAPI) ListAccounts() []common.Address {
}
// NewAccount will create a new account and returns the address for the new account.
func (s *PrivateAccountAPI) NewAccount(password string, w bool) (common.Address, error) {
acc, err := s.am.NewAccount(password, w)
func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error) {
acc, err := s.am.NewAccount(password, true)
if err == nil {
return acc.Address, nil
}
return common.Address{}, err
}
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
// encrypting it with the passphrase.
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
hexkey, err := hex.DecodeString(privkey)
if err != nil {
@ -296,6 +303,39 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
return s.am.Lock(addr) == nil
}
// SignAndSendTransaction will create a transaction from the given arguments and
// tries to sign it with the key associated with args.To. If the given passwd isn't
// able to decrypt the key it fails.
func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) {
var err error
args, err = prepareSendTxArgs(ctx, args, s.b)
if err != nil {
return common.Hash{}, err
}
if args.Nonce == nil {
nonce, err := s.b.GetPoolNonce(ctx, args.From)
if err != nil {
return common.Hash{}, err
}
args.Nonce = rpc.NewHexNumber(nonce)
}
var tx *types.Transaction
if args.To == nil {
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
} else {
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
}
signature, err := s.am.SignWithPassphrase(args.From, passwd, tx.SigHash().Bytes())
if err != nil {
return common.Hash{}, err
}
return submitTransaction(ctx, s.b, tx, signature)
}
// PublicBlockChainAPI provides an API to access the Ethereum blockchain.
// It offers only methods that operate on public data that is freely available to anyone.
type PublicBlockChainAPI struct {
@ -341,7 +381,7 @@ func (s *PublicBlockChainAPI) BlockNumber() *big.Int {
// given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta
// block numbers are also allowed.
func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) {
state, err := s.b.StateByNumber(blockNr)
state, _, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return nil, err
}
@ -452,21 +492,20 @@ func (s *PublicBlockChainAPI) NewBlocks(ctx context.Context, args NewBlocksArgs)
// add a callback that is called on chain events which will format the block and notify the client
s.muNewBlockSubscriptions.Lock()
s.newBlockSubscriptions[subscription.ID()] = func(e core.ChainEvent) error {
if notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails); err == nil {
notification, err := s.rpcOutputBlock(e.Block, args.IncludeTransactions, args.TransactionDetails)
if err == nil {
return subscription.Notify(notification)
} else {
glog.V(logger.Warn).Info("unable to format block %v\n", err)
}
glog.V(logger.Warn).Info("unable to format block %v\n", err)
return nil
}
s.muNewBlockSubscriptions.Unlock()
return subscription, nil
}
// GetCode returns the code stored at the given address in the state for the given block number.
func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (string, error) {
state, err := s.b.StateByNumber(blockNr)
state, _, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return "", err
}
@ -481,7 +520,7 @@ func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Addres
// block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block
// numbers are also allowed.
func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNr rpc.BlockNumber) (string, error) {
state, err := s.b.StateByNumber(blockNr)
state, _, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return "0x", err
}
@ -512,6 +551,7 @@ func (m callmsg) Gas() *big.Int { return m.gas }
func (m callmsg) Value() *big.Int { return m.value }
func (m callmsg) Data() []byte { return m.data }
// CallArgs represents the arguments for a call.
type CallArgs struct {
From common.Address `json:"from"`
To *common.Address `json:"to"`
@ -522,65 +562,61 @@ type CallArgs struct {
}
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (string, *big.Int, error) {
if header := s.b.HeaderByNumber(blockNr); header != nil {
// If there's no code to interact with, respond with an appropriate error
if args.To != nil {
state, err := s.b.GetState(header)
if state == nil || err != nil {
return "", nil, err
}
if code, err := state.GetCode(ctx, *args.To); err != nil || len(code) == 0 {
return "0x", nil, ErrNoCode
}
}
// Set the account address to interact with
var addr common.Address
if args.From == (common.Address{}) {
accounts := s.b.AccountManager().Accounts()
if len(accounts) == 0 {
addr = common.Address{}
} else {
addr = accounts[0].Address
}
} else {
addr = args.From
}
// Assemble the CALL invocation
msg := callmsg{
addr: addr,
to: args.To,
gas: args.Gas.BigInt(),
gasPrice: args.GasPrice.BigInt(),
value: args.Value.BigInt(),
data: common.FromHex(args.Data),
}
if msg.gas.Cmp(common.Big0) == 0 {
msg.gas = big.NewInt(50000000)
}
if msg.gasPrice.Cmp(common.Big0) == 0 {
msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon)
}
// Execute the call and return
vmenv, vmError, err := s.b.GetVMEnv(ctx, msg, header)
if err != nil {
return "0x", common.Big0, err
}
gp := new(core.GasPool).AddGas(common.MaxBig)
res, gas, err := core.ApplyMessage(vmenv, msg, gp)
if err := vmError(); err != nil {
return "0x", common.Big0, err
}
if len(res) == 0 { // backwards compatability
return "0x", gas, err
}
return common.ToHex(res), gas, err
state, header, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return "0x", common.Big0, err
}
return "0x", common.Big0, nil
// Set the account address to interact with
var addr common.Address
if args.From == (common.Address{}) {
accounts := s.b.AccountManager().Accounts()
if len(accounts) == 0 {
addr = common.Address{}
} else {
addr = accounts[0].Address
}
} else {
addr = args.From
}
nonce, err := state.GetNonce(ctx, addr)
if err != nil {
return "0x", common.Big0, err
}
// Assemble the CALL invocation
msg := callmsg{
addr: addr,
nonce: nonce,
to: args.To,
gas: args.Gas.BigInt(),
gasPrice: args.GasPrice.BigInt(),
value: args.Value.BigInt(),
data: common.FromHex(args.Data),
}
if msg.gas.Cmp(common.Big0) == 0 {
msg.gas = big.NewInt(50000000)
}
if msg.gasPrice.Cmp(common.Big0) == 0 {
msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon)
}
// Execute the call and return
vmenv, vmError, err := s.b.GetVMEnv(ctx, msg, state, header)
if err != nil {
return "0x", common.Big0, err
}
gp := new(core.GasPool).AddGas(common.MaxBig)
res, gas, err := core.ApplyMessage(vmenv, msg, gp)
if err := vmError(); err != nil {
return "0x", common.Big0, err
}
if len(res) == 0 { // backwards compatability
return "0x", gas, err
}
return common.ToHex(res), gas, err
}
// Call executes the given transaction on the state for the given block number.
@ -649,56 +685,63 @@ func FormatLogs(structLogs []vm.StructLog) []StructLogRes {
return formattedStructLogs
}
// TraceCall executes a call and returns the amount of gas, created logs and optionally returned values.
func (s *PublicBlockChainAPI) TraceCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) {
if header := s.b.HeaderByNumber(blockNr); header != nil {
var addr common.Address
if args.From == (common.Address{}) {
accounts := s.b.AccountManager().Accounts()
if len(accounts) == 0 {
addr = common.Address{}
} else {
addr = accounts[0].Address
}
} else {
addr = args.From
}
// Assemble the CALL invocation
msg := callmsg{
addr: addr,
to: args.To,
gas: args.Gas.BigInt(),
gasPrice: args.GasPrice.BigInt(),
value: args.Value.BigInt(),
data: common.FromHex(args.Data),
}
if msg.gas.Cmp(common.Big0) == 0 {
msg.gas = big.NewInt(50000000)
}
if msg.gasPrice.Cmp(common.Big0) == 0 {
msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon)
}
// Execute the call and return
vmenv, vmError, err := s.b.GetVMEnv(ctx, msg, header)
if err != nil {
return nil, err
}
gp := new(core.GasPool).AddGas(common.MaxBig)
ret, gas, err := core.ApplyMessage(vmenv, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return &ExecutionResult{
Gas: gas,
ReturnValue: fmt.Sprintf("%x", ret),
StructLogs: FormatLogs(vmenv.StructLogs()),
}, nil
state, header, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return nil, err
}
return nil, nil
var addr common.Address
if args.From == (common.Address{}) {
accounts := s.b.AccountManager().Accounts()
if len(accounts) == 0 {
addr = common.Address{}
} else {
addr = accounts[0].Address
}
} else {
addr = args.From
}
nonce, err := state.GetNonce(ctx, addr)
if err != nil {
return nil, err
}
// Assemble the CALL invocation
msg := callmsg{
addr: addr,
nonce: nonce,
to: args.To,
gas: args.Gas.BigInt(),
gasPrice: args.GasPrice.BigInt(),
value: args.Value.BigInt(),
data: common.FromHex(args.Data),
}
if msg.gas.Cmp(common.Big0) == 0 {
msg.gas = big.NewInt(50000000)
}
if msg.gasPrice.Cmp(common.Big0) == 0 {
msg.gasPrice = new(big.Int).Mul(big.NewInt(50), common.Shannon)
}
// Execute the call and return
vmenv, vmError, err := s.b.GetVMEnv(ctx, msg, state, header)
if err != nil {
return nil, err
}
gp := new(core.GasPool).AddGas(common.MaxBig)
ret, gas, err := core.ApplyMessage(vmenv, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return &ExecutionResult{
Gas: gas,
ReturnValue: fmt.Sprintf("%x", ret),
StructLogs: FormatLogs(vmenv.StructLogs()),
}, nil
}
// rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
@ -829,7 +872,6 @@ func newRPCTransaction(b *types.Block, txHash common.Hash) (*RPCTransaction, err
// PublicTransactionPoolAPI exposes methods for the RPC interface
type PublicTransactionPoolAPI struct {
b Backend
txMu sync.Mutex
muPendingTxSubs sync.Mutex
pendingTxSubs map[string]rpc.Subscription
}
@ -917,7 +959,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(ctx context
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*rpc.HexNumber, error) {
state, err := s.b.StateByNumber(blockNr)
state, _, err := s.b.StateAndHeaderByNumber(blockNr)
if state == nil || err != nil {
return nil, err
}
@ -982,6 +1024,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
// GetTransactionReceipt returns the transaction receipt for the given transaction hash.
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
//fmt.Println("API GetTransactionReceipt", txHash)
receipt := core.GetReceipt(s.b.ChainDb(), txHash)
if receipt == nil {
glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex())
@ -989,12 +1032,14 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
}
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
//fmt.Println("getTransaction", err)
if err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil
}
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
//fmt.Println("getTransactionBlockData", txBlock, blockIndex, index, err)
if err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil
@ -1041,6 +1086,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti
return tx.WithSignature(signature)
}
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
type SendTxArgs struct {
From common.Address `json:"from"`
To *common.Address `json:"to"`
@ -1051,25 +1097,54 @@ type SendTxArgs struct {
Nonce *rpc.HexNumber `json:"nonce"`
}
// SendTransaction will create a transaction for the given transaction argument, sign it and submit it to the
// transaction pool.
func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
// prepareSendTxArgs is a helper function that fills in default values for unspecified tx fields.
func prepareSendTxArgs(ctx context.Context, args SendTxArgs, b Backend) (SendTxArgs, error) {
if args.Gas == nil {
args.Gas = rpc.NewHexNumber(defaultGas)
}
if args.GasPrice == nil {
price, err := s.b.SuggestPrice(ctx)
price, err := b.SuggestPrice(ctx)
if err != nil {
return common.Hash{}, err
return args, err
}
args.GasPrice = rpc.NewHexNumber(price)
}
if args.Value == nil {
args.Value = rpc.NewHexNumber(0)
}
return args, nil
}
s.txMu.Lock()
defer s.txMu.Unlock()
// submitTransaction is a helper function that submits tx to txPool and creates a log entry.
func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction, signature []byte) (common.Hash, error) {
signedTx, err := tx.WithSignature(signature)
if err != nil {
return common.Hash{}, err
}
if err := b.SendTx(ctx, signedTx); err != nil {
return common.Hash{}, err
}
if signedTx.To() == nil {
from, _ := signedTx.From()
addr := crypto.CreateAddress(from, signedTx.Nonce())
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
} else {
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
}
return signedTx.Hash(), nil
}
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
// transaction pool.
func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
var err error
args, err = prepareSendTxArgs(ctx, args, s.b)
if err != nil {
return common.Hash{}, err
}
if args.Nonce == nil {
nonce, err := s.b.GetPoolNonce(ctx, args.From)
@ -1080,31 +1155,18 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen
}
var tx *types.Transaction
contractCreation := (args.To == nil)
if contractCreation {
if args.To == nil {
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
} else {
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
}
signedTx, err := s.sign(args.From, tx)
signature, err := s.b.AccountManager().Sign(args.From, tx.SigHash().Bytes())
if err != nil {
return common.Hash{}, err
}
if err := s.b.SendTx(ctx, signedTx); err != nil {
return common.Hash{}, nil
}
if contractCreation {
addr := crypto.CreateAddress(args.From, args.Nonce.Uint64())
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", signedTx.Hash().Hex(), addr.Hex())
} else {
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", signedTx.Hash().Hex(), tx.To().Hex())
}
return signedTx.Hash(), nil
return submitTransaction(ctx, s.b, tx, signature)
}
// SendRawTransaction will add the signed transaction to the transaction pool.
@ -1133,13 +1195,14 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
return tx.Hash().Hex(), nil
}
// Sign will sign the given data string with the given address. The account corresponding with the address needs to
// be unlocked.
func (s *PublicTransactionPoolAPI) Sign(addr common.Address, data string) (string, error) {
signature, error := s.b.AccountManager().Sign(addr, common.HexToHash(data).Bytes())
// Sign signs the given hash using the key that matches the address. The key must be
// unlocked in order to sign the hash.
func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (string, error) {
signature, error := s.b.AccountManager().Sign(addr, hash[:])
return common.ToHex(signature), error
}
// SignTransactionArgs represents the arguments to sign a transaction.
type SignTransactionArgs struct {
From common.Address
To *common.Address
@ -1166,6 +1229,7 @@ type Tx struct {
Hash common.Hash `json:"hash"`
}
// UnmarshalJSON parses JSON data into tx.
func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
req := struct {
To *common.Address `json:"to"`
@ -1206,8 +1270,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
tx.GasPrice = rpc.NewHexNumber(int64(50000000000))
}
contractCreation := (req.To == nil)
if contractCreation {
if req.To == nil {
tx.tx = types.NewContractCreation(tx.Nonce.Uint64(), tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
} else {
tx.tx = types.NewTransaction(tx.Nonce.Uint64(), *tx.To, tx.Value.BigInt(), tx.GasLimit.BigInt(), tx.GasPrice.BigInt(), data)
@ -1216,6 +1279,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
return nil
}
// SignTransactionResult represents a RLP encoded signed transaction.
type SignTransactionResult struct {
Raw string `json:"raw"`
Tx *Tx `json:"tx"`
@ -1239,7 +1303,7 @@ func newTx(t *types.Transaction) *Tx {
// SignTransaction will sign the given transaction with the from account.
// The node needs to have the private key of the account corresponding with
// the given from address and it needs to be unlocked.
func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args *SignTransactionArgs) (*SignTransactionResult, error) {
func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args SignTransactionArgs) (*SignTransactionResult, error) {
if args.Gas == nil {
args.Gas = rpc.NewHexNumber(defaultGas)
}
@ -1254,9 +1318,6 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args *Si
args.Value = rpc.NewHexNumber(0)
}
s.txMu.Lock()
defer s.txMu.Unlock()
if args.Nonce == nil {
nonce, err := s.b.GetPoolNonce(ctx, args.From)
if err != nil {
@ -1266,9 +1327,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args *Si
}
var tx *types.Transaction
contractCreation := (args.To == nil)
if contractCreation {
if args.To == nil {
tx = types.NewContractCreation(args.Nonce.Uint64(), args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
} else {
tx = types.NewTransaction(args.Nonce.Uint64(), *args.To, args.Value.BigInt(), args.Gas.BigInt(), args.GasPrice.BigInt(), common.FromHex(args.Data))
@ -1284,14 +1343,14 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args *Si
return nil, err
}
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(tx)}, nil
return &SignTransactionResult{"0x" + common.Bytes2Hex(data), newTx(signedTx)}, nil
}
// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of
// the accounts this node manages.
func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
pending := s.b.GetPoolTransactions()
transactions := make([]*RPCTransaction, 0)
transactions := make([]*RPCTransaction, 0, len(pending))
for _, tx := range pending {
from, _ := tx.FromFrontier()
if s.b.AccountManager().HasAddress(from) {
@ -1301,7 +1360,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
return transactions
}
// NewPendingTransaction creates a subscription that is triggered each time a transaction enters the transaction pool
// NewPendingTransactions creates a subscription that is triggered each time a transaction enters the transaction pool
// and is send from one of the transactions this nodes manages.
func (s *PublicTransactionPoolAPI) NewPendingTransactions(ctx context.Context) (rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
@ -1341,8 +1400,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, tx *Tx, gasPrice,
}
var newTx *types.Transaction
contractCreation := (tx.tx.To() == nil)
if contractCreation {
if tx.tx.To() == nil {
newTx = types.NewContractCreation(tx.tx.Nonce(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
} else {
newTx = types.NewTransaction(tx.tx.Nonce(), *tx.tx.To(), tx.tx.Value(), gasPrice.BigInt(), gasLimit.BigInt(), tx.tx.Data())
@ -1368,22 +1426,26 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, tx *Tx, gasPrice,
// PrivateAdminAPI is the collection of Etheruem APIs exposed over the private
// admin endpoint.
type PrivateAdminAPI struct {
b Backend
b Backend
solcPath *string
solc **compiler.Solidity
}
// NewPrivateAdminAPI creates a new API definition for the private admin methods
// of the Ethereum service.
func NewPrivateAdminAPI(b Backend) *PrivateAdminAPI {
return &PrivateAdminAPI{b: b}
func NewPrivateAdminAPI(b Backend, solcPath *string, solc **compiler.Solidity) *PrivateAdminAPI {
return &PrivateAdminAPI{b, solcPath, solc}
}
// SetSolc sets the Solidity compiler path to be used by the node.
func (api *PrivateAdminAPI) SetSolc(path string) (string, error) {
solc, err := api.b.SetSolc(path)
var err error
*api.solcPath = path
*api.solc, err = compiler.New(path)
if err != nil {
return "", err
}
return solc.Info(), nil
return (*api.solc).Info(), nil
}
// PublicDebugAPI is the collection of Etheruem APIs exposed over the public
@ -1482,12 +1544,12 @@ func (s *PublicNetAPI) Listening() bool {
return true // always listening
}
// Peercount returns the number of connected peers
// PeerCount returns the number of connected peers
func (s *PublicNetAPI) PeerCount() *rpc.HexNumber {
return rpc.NewHexNumber(s.net.PeerCount())
}
// ProtocolVersion returns the current ethereum protocol version.
// Version returns the current ethereum protocol version.
func (s *PublicNetAPI) Version() string {
return fmt.Sprintf("%d", s.networkVersion)
}

View File

@ -1,18 +1,18 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package ethapi implements the general Ethereum API functions.
package ethapi
@ -37,8 +37,6 @@ import (
// both full and light clients) with access to necessary functions.
type Backend interface {
// general Ethereum API
Solc() (*compiler.Solidity, error)
SetSolc(solcPath string) (*compiler.Solidity, error)
Downloader() *downloader.Downloader
ProtocolVersion() int
SuggestPrice(ctx context.Context) (*big.Int, error)
@ -49,12 +47,11 @@ type Backend interface {
SetHead(number uint64)
HeaderByNumber(blockNr rpc.BlockNumber) *types.Header
BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error)
StateByNumber(blockNr rpc.BlockNumber) (State, error)
StateAndHeaderByNumber(blockNr rpc.BlockNumber) (State, *types.Header, error)
GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
GetState(header *types.Header) (State, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetTd(blockHash common.Hash) *big.Int
GetVMEnv(ctx context.Context, msg core.Message, header *types.Header) (vm.Environment, func() error, error)
GetVMEnv(ctx context.Context, msg core.Message, state State, header *types.Header) (vm.Environment, func() error, error)
// TxPool API
SendTx(ctx context.Context, signedTx *types.Transaction) error
RemoveTx(txHash common.Hash)
@ -72,12 +69,12 @@ type State interface {
GetNonce(ctx context.Context, addr common.Address) (uint64, error)
}
func GetAPIs(apiBackend Backend) []rpc.API {
func GetAPIs(apiBackend Backend, solcPath *string, solc **compiler.Solidity) []rpc.API {
return []rpc.API{
{
Namespace: "eth",
Version: "1.0",
Service: NewPublicEthereumAPI(apiBackend),
Service: NewPublicEthereumAPI(apiBackend, solcPath, solc),
Public: true,
}, {
Namespace: "eth",
@ -97,7 +94,7 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "admin",
Version: "1.0",
Service: NewPrivateAdminAPI(apiBackend),
Service: NewPrivateAdminAPI(apiBackend, solcPath, solc),
}, {
Namespace: "debug",
Version: "1.0",
@ -115,7 +112,7 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "personal",
Version: "1.0",
Service: NewPrivateAccountAPI(apiBackend.AccountManager()),
Service: NewPrivateAccountAPI(apiBackend),
Public: false,
},
}

File diff suppressed because one or more lines are too long

View File

@ -1,74 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package jsre
import (
"sort"
"strings"
"github.com/robertkrimen/otto"
)
// CompleteKeywords returns potential continuations for the given line. Since line is
// evaluated, callers need to make sure that evaluating line does not have side effects.
func (jsre *JSRE) CompleteKeywords(line string) []string {
var results []string
jsre.Do(func(vm *otto.Otto) {
results = getCompletions(vm, line)
})
return results
}
func getCompletions(vm *otto.Otto, line string) (results []string) {
parts := strings.Split(line, ".")
objRef := "this"
prefix := line
if len(parts) > 1 {
objRef = strings.Join(parts[0:len(parts)-1], ".")
prefix = parts[len(parts)-1]
}
obj, _ := vm.Object(objRef)
if obj == nil {
return nil
}
iterOwnAndConstructorKeys(vm, obj, func(k string) {
if strings.HasPrefix(k, prefix) {
if objRef == "this" {
results = append(results, k)
} else {
results = append(results, strings.Join(parts[:len(parts)-1], ".")+"."+k)
}
}
})
// Append opening parenthesis (for functions) or dot (for objects)
// if the line itself is the only completion.
if len(results) == 1 && results[0] == line {
obj, _ := vm.Object(line)
if obj != nil {
if obj.Class() == "Function" {
results[0] += "("
} else {
results[0] += "."
}
}
}
sort.Strings(results)
return results
}

File diff suppressed because it is too large Load Diff

View File

@ -1,322 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package jsre provides execution environment for JavaScript.
package jsre
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"io/ioutil"
"math/rand"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/robertkrimen/otto"
)
/*
JSRE is a generic JS runtime environment embedding the otto JS interpreter.
It provides some helper functions to
- load code from files
- run code snippets
- require libraries
- bind native go objects
*/
type JSRE struct {
assetPath string
evalQueue chan *evalReq
stopEventLoop chan bool
loopWg sync.WaitGroup
}
// jsTimer is a single timer instance with a callback function
type jsTimer struct {
timer *time.Timer
duration time.Duration
interval bool
call otto.FunctionCall
}
// evalReq is a serialized vm execution request processed by runEventLoop.
type evalReq struct {
fn func(vm *otto.Otto)
done chan bool
}
// runtime must be stopped with Stop() after use and cannot be used after stopping
func New(assetPath string) *JSRE {
re := &JSRE{
assetPath: assetPath,
evalQueue: make(chan *evalReq),
stopEventLoop: make(chan bool),
}
re.loopWg.Add(1)
go re.runEventLoop()
re.Set("loadScript", re.loadScript)
re.Set("inspect", prettyPrintJS)
return re
}
// randomSource returns a pseudo random value generator.
func randomSource() *rand.Rand {
bytes := make([]byte, 8)
seed := time.Now().UnixNano()
if _, err := crand.Read(bytes); err == nil {
seed = int64(binary.LittleEndian.Uint64(bytes))
}
src := rand.NewSource(seed)
return rand.New(src)
}
// This function runs the main event loop from a goroutine that is started
// when JSRE is created. Use Stop() before exiting to properly stop it.
// The event loop processes vm access requests from the evalQueue in a
// serialized way and calls timer callback functions at the appropriate time.
// Exported functions always access the vm through the event queue. You can
// call the functions of the otto vm directly to circumvent the queue. These
// functions should be used if and only if running a routine that was already
// called from JS through an RPC call.
func (self *JSRE) runEventLoop() {
vm := otto.New()
r := randomSource()
vm.SetRandomSource(r.Float64)
registry := map[*jsTimer]*jsTimer{}
ready := make(chan *jsTimer)
newTimer := func(call otto.FunctionCall, interval bool) (*jsTimer, otto.Value) {
delay, _ := call.Argument(1).ToInteger()
if 0 >= delay {
delay = 1
}
timer := &jsTimer{
duration: time.Duration(delay) * time.Millisecond,
call: call,
interval: interval,
}
registry[timer] = timer
timer.timer = time.AfterFunc(timer.duration, func() {
ready <- timer
})
value, err := call.Otto.ToValue(timer)
if err != nil {
panic(err)
}
return timer, value
}
setTimeout := func(call otto.FunctionCall) otto.Value {
_, value := newTimer(call, false)
return value
}
setInterval := func(call otto.FunctionCall) otto.Value {
_, value := newTimer(call, true)
return value
}
clearTimeout := func(call otto.FunctionCall) otto.Value {
timer, _ := call.Argument(0).Export()
if timer, ok := timer.(*jsTimer); ok {
timer.timer.Stop()
delete(registry, timer)
}
return otto.UndefinedValue()
}
vm.Set("_setTimeout", setTimeout)
vm.Set("_setInterval", setInterval)
vm.Run(`var setTimeout = function(args) {
if (arguments.length < 1) {
throw TypeError("Failed to execute 'setTimeout': 1 argument required, but only 0 present.");
}
return _setTimeout.apply(this, arguments);
}`)
vm.Run(`var setInterval = function(args) {
if (arguments.length < 1) {
throw TypeError("Failed to execute 'setInterval': 1 argument required, but only 0 present.");
}
return _setInterval.apply(this, arguments);
}`)
vm.Set("clearTimeout", clearTimeout)
vm.Set("clearInterval", clearTimeout)
var waitForCallbacks bool
loop:
for {
select {
case timer := <-ready:
// execute callback, remove/reschedule the timer
var arguments []interface{}
if len(timer.call.ArgumentList) > 2 {
tmp := timer.call.ArgumentList[2:]
arguments = make([]interface{}, 2+len(tmp))
for i, value := range tmp {
arguments[i+2] = value
}
} else {
arguments = make([]interface{}, 1)
}
arguments[0] = timer.call.ArgumentList[0]
_, err := vm.Call(`Function.call.call`, nil, arguments...)
if err != nil {
fmt.Println("js error:", err, arguments)
}
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
if timer.interval && inreg {
timer.timer.Reset(timer.duration)
} else {
delete(registry, timer)
if waitForCallbacks && (len(registry) == 0) {
break loop
}
}
case req := <-self.evalQueue:
// run the code, send the result back
req.fn(vm)
close(req.done)
if waitForCallbacks && (len(registry) == 0) {
break loop
}
case waitForCallbacks = <-self.stopEventLoop:
if !waitForCallbacks || (len(registry) == 0) {
break loop
}
}
}
for _, timer := range registry {
timer.timer.Stop()
delete(registry, timer)
}
self.loopWg.Done()
}
// Do executes the given function on the JS event loop.
func (self *JSRE) Do(fn func(*otto.Otto)) {
done := make(chan bool)
req := &evalReq{fn, done}
self.evalQueue <- req
<-done
}
// stops the event loop before exit, optionally waits for all timers to expire
func (self *JSRE) Stop(waitForCallbacks bool) {
self.stopEventLoop <- waitForCallbacks
self.loopWg.Wait()
}
// Exec(file) loads and runs the contents of a file
// if a relative path is given, the jsre's assetPath is used
func (self *JSRE) Exec(file string) error {
code, err := ioutil.ReadFile(common.AbsolutePath(self.assetPath, file))
if err != nil {
return err
}
var script *otto.Script
self.Do(func(vm *otto.Otto) {
script, err = vm.Compile(file, code)
if err != nil {
return
}
_, err = vm.Run(script)
})
return err
}
// Bind assigns value v to a variable in the JS environment
// This method is deprecated, use Set.
func (self *JSRE) Bind(name string, v interface{}) error {
return self.Set(name, v)
}
// Run runs a piece of JS code.
func (self *JSRE) Run(code string) (v otto.Value, err error) {
self.Do(func(vm *otto.Otto) { v, err = vm.Run(code) })
return v, err
}
// Get returns the value of a variable in the JS environment.
func (self *JSRE) Get(ns string) (v otto.Value, err error) {
self.Do(func(vm *otto.Otto) { v, err = vm.Get(ns) })
return v, err
}
// Set assigns value v to a variable in the JS environment.
func (self *JSRE) Set(ns string, v interface{}) (err error) {
self.Do(func(vm *otto.Otto) { err = vm.Set(ns, v) })
return err
}
// loadScript executes a JS script from inside the currently executing JS code.
func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value {
file, err := call.Argument(0).ToString()
if err != nil {
// TODO: throw exception
return otto.FalseValue()
}
file = common.AbsolutePath(self.assetPath, file)
source, err := ioutil.ReadFile(file)
if err != nil {
// TODO: throw exception
return otto.FalseValue()
}
if _, err := compileAndRun(call.Otto, file, source); err != nil {
// TODO: throw exception
fmt.Println("err:", err)
return otto.FalseValue()
}
// TODO: return evaluation result
return otto.TrueValue()
}
// EvalAndPrettyPrint evaluates code and pretty prints the result to
// standard output.
func (self *JSRE) EvalAndPrettyPrint(code string) (err error) {
self.Do(func(vm *otto.Otto) {
var val otto.Value
val, err = vm.Run(code)
if err != nil {
return
}
prettyPrint(vm, val)
fmt.Println()
})
return err
}
// Compile compiles and then runs a piece of JS code.
func (self *JSRE) Compile(filename string, src interface{}) (err error) {
self.Do(func(vm *otto.Otto) { _, err = compileAndRun(vm, filename, src) })
return err
}
func compileAndRun(vm *otto.Otto, filename string, src interface{}) (otto.Value, error) {
script, err := vm.Compile(filename, src)
if err != nil {
return otto.Value{}, err
}
return vm.Run(script)
}

View File

@ -1,258 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package jsre
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/fatih/color"
"github.com/robertkrimen/otto"
)
const (
maxPrettyPrintLevel = 3
indentString = " "
)
var (
functionColor = color.New(color.FgMagenta)
specialColor = color.New(color.Bold)
numberColor = color.New(color.FgRed)
stringColor = color.New(color.FgGreen)
)
// these fields are hidden when printing objects.
var boringKeys = map[string]bool{
"valueOf": true,
"toString": true,
"toLocaleString": true,
"hasOwnProperty": true,
"isPrototypeOf": true,
"propertyIsEnumerable": true,
"constructor": true,
}
// prettyPrint writes value to standard output.
func prettyPrint(vm *otto.Otto, value otto.Value) {
ppctx{vm}.printValue(value, 0, false)
}
func prettyPrintJS(call otto.FunctionCall) otto.Value {
for _, v := range call.ArgumentList {
prettyPrint(call.Otto, v)
fmt.Println()
}
return otto.UndefinedValue()
}
type ppctx struct{ vm *otto.Otto }
func (ctx ppctx) indent(level int) string {
return strings.Repeat(indentString, level)
}
func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) {
switch {
case v.IsObject():
ctx.printObject(v.Object(), level, inArray)
case v.IsNull():
specialColor.Print("null")
case v.IsUndefined():
specialColor.Print("undefined")
case v.IsString():
s, _ := v.ToString()
stringColor.Printf("%q", s)
case v.IsBoolean():
b, _ := v.ToBoolean()
specialColor.Printf("%t", b)
case v.IsNaN():
numberColor.Printf("NaN")
case v.IsNumber():
s, _ := v.ToString()
numberColor.Printf("%s", s)
default:
fmt.Printf("<unprintable>")
}
}
func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
switch obj.Class() {
case "Array":
lv, _ := obj.Get("length")
len, _ := lv.ToInteger()
if len == 0 {
fmt.Printf("[]")
return
}
if level > maxPrettyPrintLevel {
fmt.Print("[...]")
return
}
fmt.Print("[")
for i := int64(0); i < len; i++ {
el, err := obj.Get(strconv.FormatInt(i, 10))
if err == nil {
ctx.printValue(el, level+1, true)
}
if i < len-1 {
fmt.Printf(", ")
}
}
fmt.Print("]")
case "Object":
// Print values from bignumber.js as regular numbers.
if ctx.isBigNumber(obj) {
numberColor.Print(toString(obj))
return
}
// Otherwise, print all fields indented, but stop if we're too deep.
keys := ctx.fields(obj)
if len(keys) == 0 {
fmt.Print("{}")
return
}
if level > maxPrettyPrintLevel {
fmt.Print("{...}")
return
}
fmt.Println("{")
for i, k := range keys {
v, _ := obj.Get(k)
fmt.Printf("%s%s: ", ctx.indent(level+1), k)
ctx.printValue(v, level+1, false)
if i < len(keys)-1 {
fmt.Printf(",")
}
fmt.Println()
}
if inArray {
level--
}
fmt.Printf("%s}", ctx.indent(level))
case "Function":
// Use toString() to display the argument list if possible.
if robj, err := obj.Call("toString"); err != nil {
functionColor.Print("function()")
} else {
desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n")
desc = strings.Replace(desc, " (", "(", 1)
functionColor.Print(desc)
}
case "RegExp":
stringColor.Print(toString(obj))
default:
if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel {
s, _ := obj.Call("toString")
fmt.Printf("<%s %s>", obj.Class(), s.String())
} else {
fmt.Printf("<%s>", obj.Class())
}
}
}
func (ctx ppctx) fields(obj *otto.Object) []string {
var (
vals, methods []string
seen = make(map[string]bool)
)
add := func(k string) {
if seen[k] || boringKeys[k] || strings.HasPrefix(k, "_") {
return
}
seen[k] = true
if v, _ := obj.Get(k); v.IsFunction() {
methods = append(methods, k)
} else {
vals = append(vals, k)
}
}
iterOwnAndConstructorKeys(ctx.vm, obj, add)
sort.Strings(vals)
sort.Strings(methods)
return append(vals, methods...)
}
func iterOwnAndConstructorKeys(vm *otto.Otto, obj *otto.Object, f func(string)) {
seen := make(map[string]bool)
iterOwnKeys(vm, obj, func(prop string) {
seen[prop] = true
f(prop)
})
if cp := constructorPrototype(obj); cp != nil {
iterOwnKeys(vm, cp, func(prop string) {
if !seen[prop] {
f(prop)
}
})
}
}
func iterOwnKeys(vm *otto.Otto, obj *otto.Object, f func(string)) {
Object, _ := vm.Object("Object")
rv, _ := Object.Call("getOwnPropertyNames", obj.Value())
gv, _ := rv.Export()
switch gv := gv.(type) {
case []interface{}:
for _, v := range gv {
f(v.(string))
}
case []string:
for _, v := range gv {
f(v)
}
default:
panic(fmt.Errorf("Object.getOwnPropertyNames returned unexpected type %T", gv))
}
}
func (ctx ppctx) isBigNumber(v *otto.Object) bool {
// Handle numbers with custom constructor.
if v, _ := v.Get("constructor"); v.Object() != nil {
if strings.HasPrefix(toString(v.Object()), "function BigNumber") {
return true
}
}
// Handle default constructor.
BigNumber, _ := ctx.vm.Object("BigNumber.prototype")
if BigNumber == nil {
return false
}
bv, _ := BigNumber.Call("isPrototypeOf", v)
b, _ := bv.ToBoolean()
return b
}
func toString(obj *otto.Object) string {
s, _ := obj.Call("toString")
return s.String()
}
func constructorPrototype(obj *otto.Object) *otto.Object {
if v, _ := obj.Get("constructor"); v.Object() != nil {
if v, _ = v.Object().Get("prototype"); v.Object() != nil {
return v.Object()
}
}
return nil
}

View File

@ -21,13 +21,12 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
@ -37,8 +36,6 @@ import (
type LesApiBackend struct {
eth *LightNodeService
SolcPath string
solc *compiler.Solidity
gpo *gasprice.LightPriceOracle
}
@ -62,37 +59,28 @@ func (b *LesApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumb
return b.GetBlock(ctx, header.Hash())
}
func (b *LesApiBackend) StateByNumber(blockNr rpc.BlockNumber) (ethapi.State, error) {
func (b *LesApiBackend) StateAndHeaderByNumber(blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) {
header := b.HeaderByNumber(blockNr)
if header == nil {
return nil, nil
return nil, nil, nil
}
return light.NewLightState(light.StateTrieID(header), b.eth.odr), nil
return light.NewLightState(light.StateTrieID(header), b.eth.odr), header, nil
}
func (b *LesApiBackend) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
return b.eth.blockchain.GetBlock(ctx, blockHash)
}
func (b *LesApiBackend) GetState(header *types.Header) (ethapi.State, error) {
id := &light.TrieID{
BlockHash: header.Hash(),
Root: header.Root,
}
return light.NewLightState(id, b.eth.odr), nil
return b.eth.blockchain.GetBlockByHash(ctx, blockHash)
}
func (b *LesApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
return light.GetBlockReceipts(ctx, b.eth.odr, blockHash)
return light.GetBlockReceipts(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash))
}
func (b *LesApiBackend) GetTd(blockHash common.Hash) *big.Int {
return b.eth.blockchain.GetTd(blockHash)
return b.eth.blockchain.GetTdByHash(blockHash)
}
func (b *LesApiBackend) GetVMEnv(ctx context.Context, msg core.Message, header *types.Header) (vm.Environment, func() error, error) {
stateDb := light.NewLightState(light.StateTrieID(header), b.eth.odr)
stateDb = stateDb.Copy()
func (b *LesApiBackend) GetVMEnv(ctx context.Context, msg core.Message, state ethapi.State, header *types.Header) (vm.Environment, func() error, error) {
stateDb := state.(*light.LightState).Copy()
addr, _ := msg.From()
from, err := stateDb.GetOrNewStateObject(ctx, addr)
if err != nil {
@ -128,21 +116,7 @@ func (b *LesApiBackend) Stats() (pending int, queued int) {
}
func (b *LesApiBackend) TxPoolContent() (map[common.Address]map[uint64][]*types.Transaction, map[common.Address]map[uint64][]*types.Transaction) {
return make(map[common.Address]map[uint64][]*types.Transaction), make(map[common.Address]map[uint64][]*types.Transaction)
}
func (b *LesApiBackend) Solc() (*compiler.Solidity, error) {
var err error
if b.solc == nil {
b.solc, err = compiler.New(b.SolcPath)
}
return b.solc, err
}
func (b *LesApiBackend) SetSolc(solcPath string) (*compiler.Solidity, error) {
b.SolcPath = solcPath
b.solc = nil
return b.Solc()
return b.eth.txPool.Content()
}
func (b *LesApiBackend) Downloader() *downloader.Downloader {

View File

@ -24,14 +24,16 @@ import (
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/common/httpclient"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethapi"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
@ -54,12 +56,14 @@ type LightNodeService struct {
chainDb ethdb.Database // Block chain database
dappDb ethdb.Database // Dapp database
apiBackend *LesApiBackend
ApiBackend *LesApiBackend
eventMux *event.TypeMux
pow *ethash.Ethash
httpclient *httpclient.HTTPClient
accountManager *accounts.Manager
solcPath string
solc *compiler.Solidity
NatSpec bool
PowTest bool
@ -68,7 +72,7 @@ type LightNodeService struct {
}
func New(ctx *node.ServiceContext, config *eth.Config) (*LightNodeService, error) {
chainDb, dappDb, err := eth.CreateDBs(ctx, config)
chainDb, dappDb, err := eth.CreateDBs(ctx, config, "lightchaindata")
if err != nil {
return nil, err
}
@ -95,6 +99,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightNodeService, error
netVersionId: config.NetworkId,
NatSpec: config.NatSpec,
PowTest: config.PowTest,
solcPath: config.SolcPath,
}
if config.ChainConfig == nil {
@ -117,22 +122,26 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightNodeService, error
if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.pow, eth.blockchain, nil, chainDb, odr, relay); err != nil {
return nil, err
}
odr.removePeer = eth.protocolManager.removePeer
eth.apiBackend = &LesApiBackend{eth, config.SolcPath, nil, nil}
eth.apiBackend.gpo = gasprice.NewLightPriceOracle(eth.apiBackend)
eth.ApiBackend = &LesApiBackend{eth, nil}
eth.ApiBackend.gpo = gasprice.NewLightPriceOracle(eth.ApiBackend)
return eth, nil
}
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *LightNodeService) APIs() []rpc.API {
return append(ethapi.GetAPIs(s.apiBackend), []rpc.API{
return append(ethapi.GetAPIs(s.ApiBackend, &s.solcPath, &s.solc), []rpc.API{
{
Namespace: "eth",
Version: "1.0",
Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux),
Public: true,
}, {
Namespace: "eth",
Version: "1.0",
Service: filters.NewPublicFilterAPI(s.ApiBackend),
Public: true,
}, {
Namespace: "net",
Version: "1.0",

View File

@ -0,0 +1,224 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package les implements the Light Ethereum Subprotocol.
package les
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
)
type lightFetcher struct{
pm *ProtocolManager
odr *LesOdr
chain BlockChain
reqMu sync.RWMutex
requested map[uint64]chan *types.Header
syncPoolMu sync.Mutex
syncPool map[*peer]struct{}
syncPoolNotify chan struct{}
syncPoolNotified uint32
}
func newLightFetcher(pm *ProtocolManager) *lightFetcher {
f := &lightFetcher{
pm: pm,
chain: pm.blockchain,
odr: pm.odr,
requested: make(map[uint64]chan *types.Header),
syncPool: make(map[*peer]struct{}),
syncPoolNotify: make(chan struct{}),
}
go f.syncLoop()
return f
}
func (f *lightFetcher) requestedID(reqID uint64) bool {
f.reqMu.RLock()
_, ok := f.requested[reqID]
f.reqMu.RUnlock()
return ok
}
func (f *lightFetcher) deliverHeaders(reqID uint64, headers []*types.Header) {
f.reqMu.Lock()
chn := f.requested[reqID]
if len(headers) == 1 {
chn <- headers[0]
} else {
chn <- nil
}
close(chn)
delete(f.requested, reqID)
f.reqMu.Unlock()
}
func (f *lightFetcher) notify(p *peer, block blockInfo) {
p.lock.Lock()
if block.Td.Cmp(p.headInfo.Td) <= 0 {
p.lock.Unlock()
return
}
p.headInfo = block
p.lock.Unlock()
head := f.pm.blockchain.CurrentHeader()
currentTd := core.GetTd(f.pm.chainDb, head.Hash(), head.Number.Uint64())
if block.Td.Cmp(currentTd) > 0 {
f.syncPoolMu.Lock()
f.syncPool[p] = struct{}{}
f.syncPoolMu.Unlock()
if atomic.SwapUint32(&f.syncPoolNotified, 1) == 0 {
f.syncPoolNotify <- struct{}{}
}
}
}
func (f *lightFetcher) fetchBestFromPool() *peer {
head := f.pm.blockchain.CurrentHeader()
currentTd := core.GetTd(f.pm.chainDb, head.Hash(), head.Number.Uint64())
f.syncPoolMu.Lock()
var best *peer
for p, _ := range f.syncPool {
td := p.Td()
if td.Cmp(currentTd) <= 0 {
delete(f.syncPool, p)
} else {
if best == nil || td.Cmp(best.Td()) > 0 {
best = p
}
}
}
if best != nil {
delete(f.syncPool, best)
}
f.syncPoolMu.Unlock()
return best
}
func (f *lightFetcher) syncLoop() {
f.pm.wg.Add(1)
defer f.pm.wg.Done()
for {
select {
case <-f.pm.quitSync:
return
case <-f.syncPoolNotify:
atomic.StoreUint32(&f.syncPoolNotified, 0)
chn := f.pm.getSyncLock(false)
if chn != nil {
if atomic.SwapUint32(&f.syncPoolNotified, 1) == 0 {
go func() {
<-chn
f.syncPoolNotify <- struct{}{}
}()
}
} else {
if p := f.fetchBestFromPool(); p != nil {
go f.syncWithPeer(p)
if atomic.SwapUint32(&f.syncPoolNotified, 1) == 0 {
go func() {
time.Sleep(softRequestTimeout)
f.syncPoolNotify <- struct{}{}
}()
}
}
}
}
}
}
func (f *lightFetcher) syncWithPeer(p *peer) bool {
f.pm.wg.Add(1)
defer f.pm.wg.Done()
headNum := f.chain.CurrentHeader().Number.Uint64()
peerHead := p.headBlockInfo()
if !f.pm.needToSync(peerHead) {
return true
}
if peerHead.Number <= headNum+1 {
var header *types.Header
reqID, chn := f.request(p, peerHead)
select {
case header = <-chn:
if header == nil || header.Hash() != peerHead.Hash ||
header.Number.Uint64() != peerHead.Number {
// missing or wrong header returned
fmt.Println("removePeer 1")
f.pm.removePeer(p.id)
return false
}
case <-time.After(hardRequestTimeout):
if !disableClientRemovePeer {
fmt.Println("removePeer 2")
f.pm.removePeer(p.id)
}
f.reqMu.Lock()
close(f.requested[reqID])
delete(f.requested, reqID)
f.reqMu.Unlock()
return false
}
// got the header, try to insert
f.chain.InsertHeaderChain([]*types.Header{header}, 1)
defer func() {
// check header td at the end of syncing, drop peer if it was fake
headerTd := core.GetTd(f.pm.chainDb, header.Hash(), header.Number.Uint64())
if headerTd != nil && headerTd.Cmp(peerHead.Td) != 0 {
fmt.Println("removePeer 3")
f.pm.removePeer(p.id)
}
}()
if !f.pm.needToSync(peerHead) {
return true
}
}
f.pm.waitSyncLock()
if !f.pm.needToSync(peerHead) {
// synced up by the one we've been waiting to end
f.pm.releaseSyncLock()
return true
}
f.pm.syncWithLockAcquired(p)
return !f.pm.needToSync(peerHead)
}
func (f *lightFetcher) request(p *peer, block blockInfo) (uint64, chan *types.Header) {
reqID := f.odr.getNextReqID()
f.reqMu.Lock()
chn := make(chan *types.Header, 1)
f.requested[reqID] = chn
f.reqMu.Unlock()
cost := p.GetRequestCost(GetBlockHeadersMsg, 1)
p.fcServer.SendRequest(reqID, cost)
p.RequestHeadersByHash(reqID, cost, block.Hash, 1, 0, false)
return reqID, chn
}

View File

@ -20,6 +20,8 @@ package flowcontrol
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
const fcTimeConst = 1000000
@ -110,7 +112,7 @@ func NewServerNode(params *ServerParams) *ServerNode {
}
func getTime() int64 {
return time.Now().UnixNano()
return int64(mclock.Now())
}
func (peer *ServerNode) recalcBLE(time int64) {

View File

@ -190,8 +190,8 @@ func (self *ClientManager) accept(node *cmNode, time int64) bool {
self.update(time)
if !self.canStartReq() {
resume := make(chan bool)
self.resumeQueue <- resume
self.lock.Unlock()
self.resumeQueue <- resume
<-resume
self.lock.Lock()
if _, ok := self.nodes[node]; !ok {

View File

@ -18,12 +18,10 @@
package les
import (
"bytes"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -53,6 +51,8 @@ const (
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
disableClientRemovePeer = true
)
// errIncompatibleConfig is returned if the requested protocols and configs are
@ -67,9 +67,10 @@ type hashFetcherFn func(common.Hash) error
type BlockChain interface {
HasHeader(hash common.Hash) bool
GetHeader(hash common.Hash) *types.Header
GetHeader(hash common.Hash, number uint64) *types.Header
GetHeaderByHash(hash common.Hash) *types.Header
CurrentHeader() *types.Header
GetTd(hash common.Hash) *big.Int
GetTdByHash(hash common.Hash) *big.Int
InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
Rollback(chain []common.Hash)
Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash)
@ -96,7 +97,7 @@ type ProtocolManager struct {
server *LesServer
downloader *downloader.Downloader
//fetcher *fetcher.Fetcher
fetcher *lightFetcher
peers *peerSet
SubProtocols []p2p.Protocol
@ -108,6 +109,10 @@ type ProtocolManager struct {
quitSync chan struct{}
noMorePeers chan struct{}
syncMu sync.Mutex
syncing bool
syncDone chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
@ -128,7 +133,7 @@ func NewProtocolManager(chainConfig *core.ChainConfig, lightSync bool, networkId
txrelay: txrelay,
odr: odr,
peers: newPeerSet(),
newPeerCh: make(chan *peer, 1),
newPeerCh: make(chan *peer),
quitSync: make(chan struct{}),
noMorePeers: make(chan struct{}),
}
@ -167,11 +172,21 @@ func NewProtocolManager(chainConfig *core.ChainConfig, lightSync bool, networkId
return nil, errIncompatibleConfig
}
removePeer := manager.removePeer
if disableClientRemovePeer {
removePeer = func(id string) {}
}
if lightSync {
glog.V(logger.Debug).Infof("LES: create downloader")
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeader,
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTd,
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, manager.removePeer)
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
manager.fetcher = newLightFetcher(manager)
}
if odr != nil {
odr.removePeer = removePeer
}
/*validator := func(block *types.Block, parent *types.Block) error {
@ -215,6 +230,10 @@ func (pm *ProtocolManager) Start() {
if pm.lightSync {
// start sync handler
go pm.syncer()
} else {
go func() {
for range pm.newPeerCh {}
}()
}
}
@ -252,7 +271,8 @@ func (pm *ProtocolManager) handle(p *peer) error {
// Execute the LES handshake
td, head, genesis := pm.blockchain.Status()
if err := p.Handshake(td, head, genesis, pm.server); err != nil {
headNum := core.GetBlockNumber(pm.chainDb, head)
if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
return err
}
@ -302,73 +322,13 @@ func (pm *ProtocolManager) handle(p *peer) error {
for {
if err := pm.handleMsg(p); err != nil {
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
fmt.Println("handleMsg err:", err)
return err
}
}
return nil
}
func getHeadersBatch(db ethdb.Database, hash common.Hash, number uint64, amount uint64, skip uint64, reverse bool) (res []rlp.RawValue) {
res = make([]rlp.RawValue, amount)
step := int64(skip + 1)
if reverse {
step = -step
}
ptr := uint64(0)
if hash != (common.Hash{}) {
res[0] = core.GetHeaderRLP(db, hash)
if res[0] == nil {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(res[0]), header); err != nil {
return nil
}
ptr = 1
number = header.GetNumberU64()
}
tasks := make(chan uint64)
go func() {
stop := time.After(time.Millisecond * 300)
loop:
for i := ptr; i < amount; i++ {
select {
case tasks <- i:
case <-stop:
break loop
}
}
close(tasks)
}()
pending := new(sync.WaitGroup)
for i := 0; i < 4; i++ {
pending.Add(1)
go func(id int) {
for index := range tasks {
num := int64(number) + step*int64(index)
if num >= 0 {
hash := core.GetCanonicalHash(db, uint64(num))
if hash != (common.Hash{}) {
res[index] = core.GetHeaderRLP(db, hash)
}
}
}
pending.Done()
}(i)
}
pending.Wait()
for i := 0; i < len(res); i++ {
if res[i] == nil {
res = res[:i]
return
}
}
return
}
var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsMsg, SendTxMsg}
// handleMsg is invoked whenever an inbound message is received from a remote
@ -415,31 +375,92 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
// Block header query, collect the requested headers and reply
case NewBlockHashesMsg:
var req newBlockHashesData
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
//fmt.Println("RECEIVED", req[0].Number, req[0].Hash, req[0].Td)
for _, r := range req {
pm.fetcher.notify(p, r)
}
case GetBlockHeadersMsg:
glog.V(logger.Debug).Infof("LES: received GetBlockHeadersMsg from peer %v", p.id)
// Decode the complex header query
var query struct {
var req struct {
ReqID uint64
getBlockHeadersData
Query getBlockHeadersData
}
if err := msg.Decode(&query); err != nil {
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
amount := query.Amount
if amount > uint64(maxReqs) {
return errResp(ErrRequestRejected, "")
}
if amount > MaxHeaderFetch {
amount = MaxHeaderFetch
}
if amount*estHeaderRlpSize > softResponseLimit {
amount = softResponseLimit / estHeaderRlpSize
}
query := req.Query
headers := getHeadersBatch(pm.chainDb, query.Origin.Hash, query.Origin.Number, amount, query.Skip, query.Reverse)
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + amount*costs.reqCost)
pm.server.fcCostStats.update(msg.Code, amount, rcost)
return p.SendBlockHeaders(query.ReqID, bv, headers)
hashMode := query.Origin.Hash != (common.Hash{})
// Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
// Retrieve the next header satisfying the query
var origin *types.Header
if hashMode {
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
number := origin.Number.Uint64()
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Advance to the next header of the query
switch {
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
// Hash based traversal towards the genesis block
for i := 0; i < int(query.Skip)+1; i++ {
if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
query.Origin.Hash = header.ParentHash
number--
} else {
unknown = true
break
}
}
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
// Hash based traversal towards the leaf block
if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
query.Origin.Hash = header.Hash()
} else {
unknown = true
}
} else {
unknown = true
}
case query.Reverse:
// Number based traversal towards the genesis block
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= (query.Skip + 1)
} else {
unknown = true
}
case !query.Reverse:
// Number based traversal towards the leaf block
query.Origin.Number += (query.Skip + 1)
}
}
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + query.Amount * costs.reqCost)
pm.server.fcCostStats.update(msg.Code, query.Amount, rcost)
return p.SendBlockHeaders(req.ReqID, bv, headers)
case BlockHeadersMsg:
if pm.downloader == nil {
@ -456,9 +477,13 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.GotReply(resp.ReqID, resp.BV)
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil {
glog.V(logger.Debug).Infoln(err)
if pm.fetcher.requestedID(resp.ReqID) {
pm.fetcher.deliverHeaders(resp.ReqID, resp.Headers)
} else {
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
}
case GetBlockBodiesMsg:
@ -485,7 +510,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested block body, stopping if enough was found
if data := core.GetBodyRLP(pm.chainDb, hash); len(data) != 0 {
if data := core.GetBodyRLP(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash)); len(data) != 0 {
bodies = append(bodies, data)
bytes += len(data)
}
@ -539,7 +564,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested state entry, stopping if enough was found
if header := core.GetHeader(pm.chainDb, req.BHash); header != nil {
if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
if trie, _ := trie.New(header.Root, pm.chainDb); trie != nil {
sdata := trie.Get(req.AccKey)
if so, err := state.DecodeObject(common.Address{}, pm.chainDb, sdata); err == nil {
@ -602,9 +627,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
results := core.GetBlockReceipts(pm.chainDb, hash)
results := core.GetBlockReceipts(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash))
if results == nil {
if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
}
}
@ -665,7 +690,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
break
}
// Retrieve the requested state entry, stopping if enough was found
if header := core.GetHeader(pm.chainDb, req.BHash); header != nil {
if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil {
if tr, _ := trie.New(header.Root, pm.chainDb); tr != nil {
if len(req.AccKey) > 0 {
data := tr.Get(req.AccKey)
@ -740,8 +765,34 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
func (self *ProtocolManager) NodeInfo() *eth.EthNodeInfo {
return &eth.EthNodeInfo{
Network: self.networkId,
Difficulty: self.blockchain.GetTd(self.blockchain.LastBlockHash()),
Difficulty: self.blockchain.GetTdByHash(self.blockchain.LastBlockHash()),
Genesis: self.blockchain.Genesis().Hash(),
Head: self.blockchain.LastBlockHash(),
}
}
func (pm *ProtocolManager) broadcastBlockLoop() {
sub := pm.eventMux.Subscribe( core.ChainHeadEvent{})
go func() {
for {
select {
case ev := <-sub.Chan():
peers := pm.peers.AllPeers()
if len(peers) > 0 {
header := ev.Data.(core.ChainHeadEvent).Block.Header()
hash := header.Hash()
number := header.Number.Uint64()
td := core.GetTd(pm.chainDb, hash, number)
//fmt.Println("BROADCAST", number, hash, td)
announce := newBlockHashesData{{Hash: hash, Number: number, Td: td}}
for _, p := range peers {
p.SendNewBlockHashes(announce)
}
}
case <-pm.quitSync:
sub.Unsubscribe()
return
}
}
}()
}

View File

@ -19,6 +19,7 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger"
@ -129,7 +130,7 @@ func (self *LesOdr) Deliver(peer *peer, msg *Msg) error {
}
func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout chan struct{}, reqWg *sync.WaitGroup) {
stime := time.Now()
stime := mclock.Now()
defer func() {
req.lock.Lock()
delete(req.sentTo, peer)
@ -139,7 +140,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
select {
case <-delivered:
servTime := uint64(time.Now().Sub(stime))
servTime := uint64(mclock.Now()-stime)
self.peers.updateTimeout(peer, false)
self.peers.updateServTime(peer, servTime)
return
@ -154,7 +155,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
select {
case <-delivered:
servTime := uint64(time.Now().Sub(stime))
servTime := uint64(mclock.Now()-stime)
self.peers.updateServTime(peer, servTime)
return
case <-time.After(hardRequestTimeout):

View File

@ -67,7 +67,7 @@ func (ps *odrPeerSet) unregister(p *peer) error {
func (ps *odrPeerSet) peerPriority(p *peer, info *odrPeerInfo, req LesOdrRequest) uint64 {
tm := p.fcServer.CanSend(req.GetCost(p))
if info.reqCnt > 0 {
if info.reqTimeCnt > 0 {
tm += info.reqTimeSum / info.reqTimeCnt
}
return tm

View File

@ -84,7 +84,7 @@ func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
return false
}
body := bodies[0]
header := core.GetHeader(db, self.Hash)
header := core.GetHeader(db, self.Hash, self.Number)
if header == nil {
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
return false
@ -139,7 +139,7 @@ func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
return false
}
hash := types.DeriveSha(receipts[0])
header := core.GetHeader(db, self.Hash)
header := core.GetHeader(db, self.Hash, self.Number)
if header == nil {
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4])
return false
@ -156,6 +156,7 @@ func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
type ProofReq struct {
BHash common.Hash
AccKey, Key []byte
FromLevel uint
}
// ODR request type for state/storage trie entries, see LesOdrRequest interface

View File

@ -54,8 +54,8 @@ type peer struct {
id string
head common.Hash
td *big.Int
headInfo blockInfo
number uint64
lock sync.RWMutex
knownBlocks *set.Set // Set of block hashes known to be known by this peer
@ -92,16 +92,23 @@ func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.head[:])
copy(hash[:], p.headInfo.Hash[:])
return hash
}
func (p *peer) headBlockInfo() blockInfo {
p.lock.RLock()
defer p.lock.RUnlock()
return p.headInfo
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
defer p.lock.RUnlock()
return new(big.Int).Set(p.td)
return new(big.Int).Set(p.headInfo.Td)
}
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
@ -124,8 +131,14 @@ func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
return p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
}
// SendNewBlockHashes announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(request newBlockHashesData) error {
return p2p.Send(p.rw, NewBlockHashesMsg, request)
}
// SendBlockHeaders sends a batch of block headers to the remote peer.
func (p *peer) SendBlockHeaders(reqID, bv uint64, headers []rlp.RawValue) error {
func (p *peer) SendBlockHeaders(reqID, bv uint64, headers []*types.Header) error {
return sendResponse(p.rw, BlockHeadersMsg, reqID, bv, headers)
}
@ -267,15 +280,16 @@ func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error)
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash, server *LesServer) error {
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
p.lock.Lock()
defer p.lock.Unlock()
var send keyValueList
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", uint64(p.network))
send = send.add("td", td)
send = send.add("bestHash", head)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
if server != nil {
send = send.add("serveHeaders", nil)
@ -294,8 +308,8 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash, ser
}
recv := recvList.decode()
var rGenesis, rBest common.Hash
var rVersion, rNetwork uint64
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
@ -304,10 +318,13 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash, ser
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("td", &rTd); err != nil {
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("bestHash", &rBest); err != nil {
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
@ -353,7 +370,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash, ser
p.fcCosts = MRC.decode()
}
// Configure the remote peer, and sanity check out handshake too
p.td, p.head = rTd, rBest
p.headInfo.Td, p.headInfo.Hash, p.headInfo.Number = rTd, rHash, rNum
return nil
}

View File

@ -124,9 +124,11 @@ type statusData struct {
}
// newBlockHashesData is the network packet for the block announcements.
type newBlockHashesData []struct {
type newBlockHashesData []blockInfo
type blockInfo struct {
Hash common.Hash // Hash of one particular block being announced
Number uint64 // Number of one particular block being announced
Td *big.Int // Total difficulty of one particular block being announced
}
// getBlockHashesData is the network packet for the hash based hash retrieval.

View File

@ -39,6 +39,8 @@ func NewLesServer(eth *eth.FullNodeService, config *eth.Config) (*LesServer, err
if err != nil {
return nil, err
}
pm.broadcastBlockLoop()
srv := &LesServer{protocolManager: pm}
pm.server = srv
@ -62,6 +64,9 @@ func (s *LesServer) Start() {
func (s *LesServer) Stop() {
s.fcCostStats.store()
s.fcManager.Stop()
go func() {
<-s.protocolManager.noMorePeers
}()
s.protocolManager.Stop()
}

View File

@ -19,6 +19,7 @@ package les
import (
"time"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
)
@ -44,11 +45,11 @@ func (pm *ProtocolManager) syncer() {
if pm.peers.Len() < minDesiredPeerCount {
break
}
go pm.synchronise(pm.peers.BestPeer(), false)
go pm.synchronise(pm.peers.BestPeer())
case <-forceSync:
// Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer(), false)
go pm.synchronise(pm.peers.BestPeer())
case <-pm.noMorePeers:
return
@ -56,23 +57,66 @@ func (pm *ProtocolManager) syncer() {
}
}
func (pm *ProtocolManager) needToSync(peerHead blockInfo) bool {
head := pm.blockchain.CurrentHeader()
currentTd := core.GetTd(pm.chainDb, head.Hash(), head.Number.Uint64())
return currentTd != nil && peerHead.Td.Cmp(currentTd) > 0
}
// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer, exit bool) {
func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
if peer == nil {
return
}
// Make sure the peer's TD is higher than our own.
td := pm.blockchain.GetTd(pm.blockchain.LastBlockHash())
if peer.Td().Cmp(td) > 0 {
for {
if pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync) != nil {
return
}
if exit {
return
}
time.Sleep(time.Second * 5)
if !pm.needToSync(peer.headBlockInfo()) {
return
}
pm.waitSyncLock()
pm.syncWithLockAcquired(peer)
}
func (pm *ProtocolManager) waitSyncLock() {
for {
chn := pm.getSyncLock(true)
if chn == nil {
break
}
<-chn
}
}
// getSyncLock either acquires the sync lock and returns nil or returns a channel
// which is closed when the lock is free again
func (pm *ProtocolManager) getSyncLock(acquire bool) chan struct{} {
pm.syncMu.Lock()
defer pm.syncMu.Unlock()
if pm.syncing {
if pm.syncDone == nil {
pm.syncDone = make(chan struct{})
}
return pm.syncDone
} else {
pm.syncing = acquire
return nil
}
}
func (pm *ProtocolManager) releaseSyncLock() {
pm.syncMu.Lock()
pm.syncing = false
if pm.syncDone != nil {
close(pm.syncDone)
pm.syncDone = nil
}
pm.syncMu.Unlock()
}
func (pm *ProtocolManager) syncWithLockAcquired(peer *peer) {
pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync)
pm.releaseSyncLock()
}

View File

@ -106,7 +106,7 @@ func NewLightChain(odr OdrBackend, config *core.ChainConfig, pow pow.PoW, mux *e
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range core.BadHashes {
if header := bc.GetHeader(hash); header != nil {
if header := bc.GetHeaderByHash(hash); header != nil {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
bc.SetHead(header.Number.Uint64() - 1)
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
@ -131,13 +131,14 @@ func (self *LightChain) loadLastState() error {
// Corrupt or empty database, init from scratch
self.Reset()
} else {
if header := self.GetHeader(head); header != nil {
if header := self.GetHeaderByHash(head); header != nil {
self.hc.SetCurrentHeader(header)
}
}
// Issue a status log and return
headerTd := self.GetTd(self.hc.CurrentHeader().Hash())
header := self.hc.CurrentHeader()
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
return nil
@ -175,8 +176,9 @@ func (self *LightChain) Status() (td *big.Int, currentBlock common.Hash, genesis
self.mu.RLock()
defer self.mu.RUnlock()
hash := self.hc.CurrentHeader().Hash()
return self.GetTd(hash), hash, self.genesisBlock.Hash()
header := self.hc.CurrentHeader()
hash := header.Hash()
return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash()
}
// SetValidator sets the validator which is used to validate incoming headers.
@ -213,7 +215,7 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
defer bc.mu.Unlock()
// Prepare the genesis block and reinitialise the chain
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err)
}
if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
@ -239,7 +241,7 @@ func (self *LightChain) GetBody(ctx context.Context, hash common.Hash) (*types.B
body := cached.(*types.Body)
return body, nil
}
body, err := GetBody(ctx, self.odr, hash)
body, err := GetBody(ctx, self.odr, hash, self.hc.GetBlockNumber(hash))
if err != nil {
return nil, err
}
@ -255,7 +257,7 @@ func (self *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.R
if cached, ok := self.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue), nil
}
body, err := GetBodyRLP(ctx, self.odr, hash)
body, err := GetBodyRLP(ctx, self.odr, hash, self.hc.GetBlockNumber(hash))
if err != nil {
return nil, err
}
@ -267,18 +269,18 @@ func (self *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.R
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *LightChain) HasBlock(hash common.Hash) bool {
blk, _ := bc.GetBlock(NoOdr, hash)
blk, _ := bc.GetBlockByHash(NoOdr, hash)
return blk != nil
}
// GetBlock retrieves a block from the database or ODR service by hash,
// GetBlock retrieves a block from the database or ODR service by hash and number,
// caching it if found.
func (self *LightChain) GetBlock(ctx context.Context, hash common.Hash) (*types.Block, error) {
func (self *LightChain) GetBlock(ctx context.Context, hash common.Hash, number uint64) (*types.Block, error) {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block), nil
}
block, err := GetBlock(ctx, self.odr, hash)
block, err := GetBlock(ctx, self.odr, hash, number)
if err != nil {
return nil, err
}
@ -287,6 +289,12 @@ func (self *LightChain) GetBlock(ctx context.Context, hash common.Hash) (*types.
return block, nil
}
// GetBlockByHash retrieves a block from the database or ODR service by hash,
// caching it if found.
func (self *LightChain) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
return self.GetBlock(ctx, hash, self.hc.GetBlockNumber(hash))
}
// GetBlockByNumber retrieves a block from the database or ODR service by
// number, caching it (associated with its hash) if found.
func (self *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*types.Block, error) {
@ -294,7 +302,7 @@ func (self *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*t
if hash == (common.Hash{}) {
return nil, nil
}
return self.GetBlock(ctx, hash)
return self.GetBlock(ctx, hash, number)
}
// Stop stops the blockchain service. If any imports are currently in progress
@ -320,8 +328,8 @@ func (self *LightChain) Rollback(chain []common.Hash) {
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
if self.hc.CurrentHeader().Hash() == hash {
self.hc.SetCurrentHeader(self.GetHeader(self.hc.CurrentHeader().ParentHash))
if head := self.hc.CurrentHeader(); head.Hash() == hash {
self.hc.SetCurrentHeader(self.GetHeader(head.ParentHash, head.Number.Uint64()-1))
}
}
}
@ -330,9 +338,9 @@ func (self *LightChain) Rollback(chain []common.Hash) {
// posts them into the event mux.
func (self *LightChain) postChainEvents(events []interface{}) {
for _, event := range events {
if event, ok := event.(LightChainEvent); ok {
if event, ok := event.(core.ChainEvent); ok {
if self.LastBlockHash() == event.Hash {
self.eventMux.Post(LightChainHeadEvent{event.Header})
self.eventMux.Post(core.ChainHeadEvent{Block: event.Block})
}
}
// Fire the insertion events individually too
@ -371,16 +379,16 @@ func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
if glog.V(logger.Debug) {
glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
}
events = append(events, LightChainEvent{header, header.Hash()})
events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
case core.SideStatTy:
if glog.V(logger.Detail) {
glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
}
events = append(events, LightChainSideEvent{header})
events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
case core.SplitStatTy:
events = append(events, LightChainSplitEvent{header})
events = append(events, core.ChainSplitEvent{Block: types.NewBlockWithHeader(header)})
}
return err
@ -400,15 +408,27 @@ func (self *LightChain) CurrentHeader() *types.Header {
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *LightChain) GetTd(hash common.Hash) *big.Int {
return self.hc.GetTd(hash)
// database by hash and number, caching it if found.
func (self *LightChain) GetTd(hash common.Hash, number uint64) *big.Int {
return self.hc.GetTd(hash, number)
}
// GetHeader retrieves a block header from the database by hash, caching it if
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
func (self *LightChain) GetTdByHash(hash common.Hash) *big.Int {
return self.hc.GetTdByHash(hash)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (self *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header {
return self.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (self *LightChain) GetHeader(hash common.Hash) *types.Header {
return self.hc.GetHeader(hash)
func (self *LightChain) GetHeaderByHash(hash common.Hash) *types.Header {
return self.hc.GetHeaderByHash(hash)
}
// HasHeader checks if a block header is present in the database or not, caching

View File

@ -111,22 +111,24 @@ func (req *CodeRequest) StoreResult(db ethdb.Database) {
type BlockRequest struct {
OdrRequest
Hash common.Hash
Number uint64
Rlp []byte
}
// StoreResult stores the retrieved data in local database
func (req *BlockRequest) StoreResult(db ethdb.Database) {
core.WriteBodyRLP(db, req.Hash, req.Rlp)
core.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp)
}
// ReceiptsRequest is the ODR request type for retrieving block bodies
type ReceiptsRequest struct {
OdrRequest
Hash common.Hash
Number uint64
Receipts types.Receipts
}
// StoreResult stores the retrieved data in local database
func (req *ReceiptsRequest) StoreResult(db ethdb.Database) {
core.WriteBlockReceipts(db, req.Hash, req.Receipts)
core.WriteBlockReceipts(db, req.Hash, req.Number, req.Receipts)
}

View File

@ -53,11 +53,11 @@ func retrieveContractCode(ctx context.Context, odr OdrBackend, id *TrieID, hash
}
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash) (rlp.RawValue, error) {
if data := core.GetBodyRLP(odr.Database(), hash); data != nil {
func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (rlp.RawValue, error) {
if data := core.GetBodyRLP(odr.Database(), hash, number); data != nil {
return data, nil
}
r := &BlockRequest{Hash: hash}
r := &BlockRequest{Hash: hash, Number: number}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {
@ -67,8 +67,8 @@ func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash) (rlp.RawV
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash.
func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash) (*types.Body, error) {
data, err := GetBodyRLP(ctx, odr, hash)
func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Body, error) {
data, err := GetBodyRLP(ctx, odr, hash, number)
if err != nil {
return nil, err
}
@ -82,13 +82,13 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash) (*types.Body
// GetBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body.
func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash) (*types.Block, error) {
func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) {
// Retrieve the block header and body contents
header := core.GetHeader(odr.Database(), hash)
header := core.GetHeader(odr.Database(), hash, number)
if header == nil {
return nil, ErrNoHeader
}
body, err := GetBody(ctx, odr, hash)
body, err := GetBody(ctx, odr, hash, number)
if err != nil {
return nil, err
}
@ -98,12 +98,12 @@ func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash) (*types.Blo
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash.
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash) (types.Receipts, error) {
receipts := core.GetBlockReceipts(odr.Database(), hash)
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) {
receipts := core.GetBlockReceipts(odr.Database(), hash, number)
if receipts != nil {
return receipts, nil
}
r := &ReceiptsRequest{Hash: hash}
r := &ReceiptsRequest{Hash: hash, Number: number}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {

View File

@ -102,7 +102,7 @@ func NewStateObject(address common.Address, odr OdrBackend) *StateObject {
codeHash: emptyCodeHash,
storage: make(Storage),
}
object.trie = NewLightTrie(nil, odr, true)
object.trie = NewLightTrie(&TrieID{}, odr, true)
return object
}

View File

@ -84,7 +84,7 @@ func NewTxPool(config *core.ChainConfig, eventMux *event.TypeMux, chain *LightCh
mined: make(map[common.Hash][]*types.Transaction),
quit: make(chan bool),
eventMux: eventMux,
events: eventMux.Subscribe(LightChainHeadEvent{}),
events: eventMux.Subscribe(core.ChainHeadEvent{}),
chain: chain,
relay: relay,
odr: chain.Odr(),
@ -128,12 +128,14 @@ type txBlockData struct {
// storeTxBlockData stores the block position of a mined tx in the local db
func (pool *TxPool) storeTxBlockData(txh common.Hash, tbd txBlockData) {
//fmt.Println("storeTxBlockData", txh, tbd)
data, _ := rlp.EncodeToBytes(tbd)
pool.chainDb.Put(append(txh[:], byte(1)), data)
}
// removeTxBlockData removes the stored block position of a rolled back tx
func (pool *TxPool) removeTxBlockData(txh common.Hash) {
//fmt.Println("removeTxBlockData", txh)
pool.chainDb.Delete(append(txh[:], byte(1)))
}
@ -167,18 +169,38 @@ func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Has
// and marks them as mined if necessary. It also stores block position in the db
// and adds them to the received txStateChanges map.
func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, idx uint64, txc txStateChanges) error {
//fmt.Println("checkMinedTxs")
if len(pool.pending) == 0 {
return nil
}
//fmt.Println("len(pool) =", len(pool.pending))
receipts, err := GetBlockReceipts(ctx, pool.odr, hash)
block, err := GetBlock(ctx, pool.odr, hash, idx)
var receipts types.Receipts
if err != nil {
//fmt.Println(err)
return err
}
//fmt.Println("len(block.Transactions()) =", len(block.Transactions()))
list := pool.mined[hash]
for i, receipt := range receipts {
txHash := receipt.TxHash
for i, tx := range block.Transactions() {
txHash := tx.Hash()
//fmt.Println(" txHash:", txHash)
if tx, ok := pool.pending[txHash]; ok {
//fmt.Println("TX FOUND")
if receipts == nil {
receipts, err = GetBlockReceipts(ctx, pool.odr, hash, idx)
if err != nil {
return err
}
if len(receipts) != len(block.Transactions()) {
panic(nil) // should never happen if hashes did match
}
core.SetReceiptsData(block, receipts)
}
//fmt.Println("WriteReceipt", receipts[i].TxHash)
core.WriteReceipt(pool.chainDb, receipts[i])
pool.storeTxBlockData(txHash, txBlockData{hash, idx, uint64(i)})
delete(pool.pending, txHash)
list = append(list, tx)
@ -214,18 +236,18 @@ func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
// possible to continue checking the missing blocks at the next chain head event
func (pool *TxPool) setNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
txc := make(txStateChanges)
oldh := pool.chain.GetHeader(pool.head)
oldh := pool.chain.GetHeaderByHash(pool.head)
newh := newHeader
// find common ancestor, create list of rolled back and new block hashes
var oldHashes, newHashes []common.Hash
for oldh.Hash() != newh.Hash() {
if oldh.GetNumberU64() >= newh.GetNumberU64() {
oldHashes = append(oldHashes, oldh.Hash())
oldh = pool.chain.GetHeader(oldh.ParentHash)
oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
}
if oldh.GetNumberU64() < newh.GetNumberU64() {
newHashes = append(newHashes, newh.Hash())
newh = pool.chain.GetHeader(newh.ParentHash)
newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
}
}
if oldh.GetNumberU64() < pool.clearIdx {
@ -274,7 +296,7 @@ const blockCheckTimeout = time.Second * 3
func (pool *TxPool) eventLoop() {
for ev := range pool.events.Chan() {
switch ev.Data.(type) {
case LightChainHeadEvent:
case core.ChainHeadEvent:
pool.mu.Lock()
ctx, _ := context.WithTimeout(context.Background(), blockCheckTimeout)
head := pool.chain.CurrentHeader()
@ -339,7 +361,7 @@ func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error
// Check the transaction doesn't exceed the current
// block limit gas.
header := pool.chain.GetHeader(pool.head)
header := pool.chain.GetHeaderByHash(pool.head)
if header.GasLimit.Cmp(tx.Gas()) < 0 {
return core.ErrGasLimit
}
@ -428,6 +450,7 @@ func (self *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
if err := self.add(ctx, tx); err != nil {
return err
}
//fmt.Println("Send", tx.Hash())
self.relay.Send(types.Transactions{tx})
self.chainDb.Put(tx.Hash().Bytes(), data)
@ -469,8 +492,8 @@ func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
// GetTransactions returns all currently processable transactions.
// The returned slice may be modified by the caller.
func (self *TxPool) GetTransactions() (txs types.Transactions) {
self.mu.Lock()
defer self.mu.Unlock()
self.mu.RLock()
defer self.mu.RUnlock()
txs = make(types.Transactions, len(self.pending))
i := 0
@ -481,6 +504,29 @@ func (self *TxPool) GetTransactions() (txs types.Transactions) {
return txs
}
// Content retrieves the data content of the transaction pool, returning all the
// pending as well as queued transactions, grouped by account and nonce.
func (self *TxPool) Content() (map[common.Address]map[uint64][]*types.Transaction, map[common.Address]map[uint64][]*types.Transaction) {
self.mu.RLock()
defer self.mu.RUnlock()
// Retrieve all the pending transactions and sort by account and by nonce
pending := make(map[common.Address]map[uint64][]*types.Transaction)
for _, tx := range self.pending {
account, _ := tx.From()
owned, ok := pending[account]
if !ok {
owned = make(map[uint64][]*types.Transaction)
pending[account] = owned
}
owned[tx.Nonce()] = append(owned[tx.Nonce()], tx)
}
// There are no queued transactions in a light pool, just return an empty map
queued := make(map[common.Address]map[uint64][]*types.Transaction)
return pending, queued
}
// RemoveTransactions removes all given transactions from the pool.
func (self *TxPool) RemoveTransactions(txs types.Transactions) {
self.mu.Lock()

View File

@ -81,7 +81,7 @@ func (self *VMEnv) SetDepth(i int) { self.depth = i }
func (self *VMEnv) VmType() vm.Type { return self.typ }
func (self *VMEnv) SetVmType(t vm.Type) { self.typ = t }
func (self *VMEnv) GetHash(n uint64) common.Hash {
for header := self.chain.GetHeader(self.header.ParentHash); header != nil; header = self.chain.GetHeader(header.ParentHash) {
for header := self.chain.GetHeader(self.header.ParentHash, self.header.Number.Uint64()-1); header != nil; header = self.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) {
if header.GetNumberU64() == n {
return header.Hash()
}

View File

@ -272,7 +272,7 @@ func (self *worker) wait() {
go self.mux.Post(core.NewMinedBlockEvent{Block: block})
} else {
work.state.Commit()
parent := self.chain.GetBlock(block.ParentHash())
parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
glog.V(logger.Error).Infoln("Invalid block found during mining")
continue
@ -319,7 +319,7 @@ func (self *worker) wait() {
self.mux.Post(core.ChainHeadEvent{Block: block})
self.mux.Post(logs)
}
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err)
}
}(block, work.state.Logs(), work.receipts)

View File

@ -49,7 +49,7 @@ type Node struct {
datadir string // Path to the currently used data directory
eventmux *event.TypeMux // Event multiplexer used between the services of a stack
serverConfig *p2p.Server // Configuration of the underlying P2P networking layer
serverConfig p2p.Config
server *p2p.Server // Currently running P2P networking layer
serviceFuncs []ServiceConstructor // Service constructors (in dependency order)
@ -97,7 +97,7 @@ func New(conf *Config) (*Node, error) {
}
return &Node{
datadir: conf.DataDir,
serverConfig: &p2p.Server{
serverConfig: p2p.Config{
PrivateKey: conf.NodeKey(),
Name: conf.Name,
Discovery: !conf.NoDiscovery,
@ -151,9 +151,7 @@ func (n *Node) Start() error {
return ErrNodeRunning
}
// Otherwise copy and specialize the P2P configuration
running := new(p2p.Server)
*running = *n.serverConfig
running := &p2p.Server{Config: n.serverConfig}
services := make(map[reflect.Type]Service)
for _, constructor := range n.serviceFuncs {
// Create a new context for the particular service

View File

@ -54,12 +54,8 @@ var errServerStopped = errors.New("server stopped")
var srvjslog = logger.NewJsonLogger()
// Server manages all peer connections.
//
// The fields of Server are used as configuration parameters.
// You should set them before starting the Server. Fields may not be
// modified while the server is running.
type Server struct {
// Config holds Server options.
type Config struct {
// This field must be set to a valid secp256k1 private key.
PrivateKey *ecdsa.PrivateKey
@ -120,6 +116,12 @@ type Server struct {
// If NoDial is true, the server will not dial any peers.
NoDial bool
}
// Server manages all peer connections.
type Server struct {
// Config fields may not be modified while the server is running.
Config
// Hooks for testing. These are useful because we can inhibit
// the whole protocol stack.

View File

@ -25,6 +25,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node"
@ -58,12 +60,20 @@ type ReleaseService struct {
// releases and notify the user of such.
func NewReleaseService(ctx *node.ServiceContext, config Config) (node.Service, error) {
// Retrieve the Ethereum service dependency to access the blockchain
var apiBackend ethapi.Backend
var ethereum *eth.FullNodeService
if err := ctx.Service(&ethereum); err != nil {
return nil, err
if err := ctx.Service(&ethereum); err == nil {
apiBackend = ethereum.ApiBackend
} else {
var ethereum *les.LightNodeService
if err := ctx.Service(&ethereum); err == nil {
apiBackend = ethereum.ApiBackend
} else {
return nil, err
}
}
// Construct the release service
contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(ethereum))
contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(apiBackend))
if err != nil {
return nil, err
}
@ -112,7 +122,7 @@ func (r *ReleaseService) checker() {
// Retrieve the current version, and handle missing contracts gracefully
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
opts := &bind.CallOpts{Ctx: ctx}
opts := &bind.CallOpts{Context: ctx}
version, err := r.oracle.CurrentVersion(opts)
if err != nil {
if err == bind.ErrNoCode {

View File

@ -30,7 +30,7 @@ import (
)
const (
jsonRPCVersion = "2.0"
JSONRPCVersion = "2.0"
serviceMethodSeparator = "_"
subscribeMethod = "eth_subscribe"
unsubscribeMethod = "eth_unsubscribe"
@ -302,31 +302,31 @@ func parsePositionalArguments(args json.RawMessage, callbackArgs []reflect.Type)
// CreateResponse will create a JSON-RPC success response with the given id and reply as result.
func (c *jsonCodec) CreateResponse(id interface{}, reply interface{}) interface{} {
if isHexNum(reflect.TypeOf(reply)) {
return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)}
return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)}
}
return &JSONSuccessResponse{Version: jsonRPCVersion, Id: id, Result: reply}
return &JSONSuccessResponse{Version: JSONRPCVersion, Id: id, Result: reply}
}
// CreateErrorResponse will create a JSON-RPC error response with the given id and error.
func (c *jsonCodec) CreateErrorResponse(id interface{}, err RPCError) interface{} {
return &JSONErrResponse{Version: jsonRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}}
return &JSONErrResponse{Version: JSONRPCVersion, Id: id, Error: JSONError{Code: err.Code(), Message: err.Error()}}
}
// CreateErrorResponseWithInfo will create a JSON-RPC error response with the given id and error.
// info is optional and contains additional information about the error. When an empty string is passed it is ignored.
func (c *jsonCodec) CreateErrorResponseWithInfo(id interface{}, err RPCError, info interface{}) interface{} {
return &JSONErrResponse{Version: jsonRPCVersion, Id: id,
return &JSONErrResponse{Version: JSONRPCVersion, Id: id,
Error: JSONError{Code: err.Code(), Message: err.Error(), Data: info}}
}
// CreateNotification will create a JSON-RPC notification with the given subscription id and event as params.
func (c *jsonCodec) CreateNotification(subid string, event interface{}) interface{} {
if isHexNum(reflect.TypeOf(event)) {
return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod,
return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod,
Params: jsonSubscription{Subscription: subid, Result: fmt.Sprintf(`%#x`, event)}}
}
return &jsonNotification{Version: jsonRPCVersion, Method: notificationMethod,
return &jsonNotification{Version: JSONRPCVersion, Method: notificationMethod,
Params: jsonSubscription{Subscription: subid, Result: event}}
}

View File

@ -342,7 +342,13 @@ func (s *Server) exec(ctx context.Context, codec ServerCodec, req *serverRequest
if req.err != nil {
response = codec.CreateErrorResponse(&req.id, req.err)
} else {
/*fmt.Println()
fmt.Println("SREQ")
fmt.Println(*req)*/
response, callback = s.handle(ctx, codec, req)
/*fmt.Println("RESP")
fmt.Println(response)
fmt.Println()*/
}
if err := codec.Write(response); err != nil {
@ -366,9 +372,15 @@ func (s *Server) execBatch(ctx context.Context, codec ServerCodec, requests []*s
responses[i] = codec.CreateErrorResponse(&req.id, req.err)
} else {
var callback func()
/*fmt.Println()
fmt.Println("SREQ batch")
fmt.Println(*req)*/
if responses[i], callback = s.handle(ctx, codec, req); callback != nil {
callbacks = append(callbacks, callback)
}
/*fmt.Println("RESP")
fmt.Println(responses[i])
fmt.Println()*/
}
}
@ -396,6 +408,10 @@ func (s *Server) readRequest(codec ServerCodec) ([]*serverRequest, bool, RPCErro
// verify requests
for i, r := range reqs {
/*fmt.Println()
fmt.Println(time.Now())
fmt.Println("REQ")
fmt.Println(r)*/
var ok bool
var svc *service

View File

@ -284,3 +284,31 @@ type Client interface {
Close()
}
type ClientRestartWrapper struct {
client Client
newClientFn func() Client
mu sync.RWMutex
}
func NewClientRestartWrapper(newClientFn func() Client) *ClientRestartWrapper {
return &ClientRestartWrapper {
client: newClientFn(),
newClientFn: newClientFn,
}
}
func (rw *ClientRestartWrapper) Client() Client {
rw.mu.RLock()
defer rw.mu.RUnlock()
return rw.client
}
func (rw *ClientRestartWrapper) Restart() {
rw.mu.Lock()
defer rw.mu.Unlock()
rw.client.Close()
rw.client = rw.newClientFn()
}

View File

@ -62,7 +62,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt
switch node := node.(type) {
case fullNode:
if len(key) > 0 {
k := self.next(node[key[0]], key[1:], isIterStart)
k := self.next(node.Children[key[0]], key[1:], isIterStart)
if k != nil {
return append([]byte{key[0]}, k...)
}
@ -74,7 +74,7 @@ func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byt
}
for i := r; i < 16; i++ {
k := self.key(node[i])
k := self.key(node.Children[i])
if k != nil {
return append([]byte{i}, k...)
}
@ -130,12 +130,12 @@ func (self *Iterator) key(node interface{}) []byte {
}
return append(k, self.key(node.Val)...)
case fullNode:
if node[16] != nil {
self.Value = node[16].(valueNode)
if node.Children[16] != nil {
self.Value = node.Children[16].(valueNode)
return []byte{16}
}
for i := 0; i < 16; i++ {
k := self.key(node[i])
k := self.key(node.Children[i])
if k != nil {
return append([]byte{byte(i)}, k...)
}
@ -175,7 +175,7 @@ type NodeIterator struct {
// NewNodeIterator creates an post-order trie iterator.
func NewNodeIterator(trie *Trie) *NodeIterator {
if bytes.Compare(trie.Root(), emptyRoot.Bytes()) == 0 {
if trie.Hash() == emptyState {
return new(NodeIterator)
}
return &NodeIterator{trie: trie}
@ -205,9 +205,11 @@ func (it *NodeIterator) step() error {
}
// Initialize the iterator if we've just started, or pop off the old node otherwise
if len(it.stack) == 0 {
it.stack = append(it.stack, &nodeIteratorState{node: it.trie.root, child: -1})
// Always start with a collapsed root
root := it.trie.Hash()
it.stack = append(it.stack, &nodeIteratorState{node: hashNode(root[:]), child: -1})
if it.stack[0].node == nil {
return fmt.Errorf("root node missing: %x", it.trie.Root())
return fmt.Errorf("root node missing: %x", it.trie.Hash())
}
} else {
it.stack = it.stack[:len(it.stack)-1]
@ -225,11 +227,11 @@ func (it *NodeIterator) step() error {
}
if node, ok := parent.node.(fullNode); ok {
// Full node, traverse all children, then the node itself
if parent.child >= len(node) {
if parent.child >= len(node.Children) {
break
}
for parent.child++; parent.child < len(node); parent.child++ {
if current := node[parent.child]; current != nil {
for parent.child++; parent.child < len(node.Children); parent.child++ {
if current := node.Children[parent.child]; current != nil {
it.stack = append(it.stack, &nodeIteratorState{node: current, parent: ancestor, child: -1})
break
}

View File

@ -29,18 +29,36 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b
type node interface {
fstring(string) string
cache() (hashNode, bool)
}
type (
fullNode [17]node
fullNode struct {
Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
dirty bool // Cached flag whether the node's new or already stored
}
shortNode struct {
Key []byte
Val node
Key []byte
Val node
hash hashNode // Cached hash of the node to prevent rehashing (may be nil)
dirty bool // Cached flag whether the node's new or already stored
}
hashNode []byte
valueNode []byte
)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n fullNode) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, n.Children)
}
// Cache accessors to retrieve precalculated values (avoid lengthy type switches).
func (n fullNode) cache() (hashNode, bool) { return n.hash, n.dirty }
func (n shortNode) cache() (hashNode, bool) { return n.hash, n.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
func (n valueNode) cache() (hashNode, bool) { return nil, true }
// Pretty printing.
func (n fullNode) String() string { return n.fstring("") }
func (n shortNode) String() string { return n.fstring("") }
@ -49,7 +67,7 @@ func (n valueNode) String() string { return n.fstring("") }
func (n fullNode) fstring(ind string) string {
resp := fmt.Sprintf("[\n%s ", ind)
for i, node := range n {
for i, node := range n.Children {
if node == nil {
resp += fmt.Sprintf("%s: <nil> ", indices[i])
} else {
@ -68,16 +86,16 @@ func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n))
}
func mustDecodeNode(dbkey, buf []byte) node {
n, err := decodeNode(buf)
func mustDecodeNode(hash, buf []byte) node {
n, err := decodeNode(hash, buf)
if err != nil {
panic(fmt.Sprintf("node %x: %v", dbkey, err))
panic(fmt.Sprintf("node %x: %v", hash, err))
}
return n
}
// decodeNode parses the RLP encoding of a trie node.
func decodeNode(buf []byte) (node, error) {
func decodeNode(hash, buf []byte) (node, error) {
if len(buf) == 0 {
return nil, io.ErrUnexpectedEOF
}
@ -87,18 +105,18 @@ func decodeNode(buf []byte) (node, error) {
}
switch c, _ := rlp.CountValues(elems); c {
case 2:
n, err := decodeShort(elems)
n, err := decodeShort(hash, buf, elems)
return n, wrapError(err, "short")
case 17:
n, err := decodeFull(elems)
n, err := decodeFull(hash, buf, elems)
return n, wrapError(err, "full")
default:
return nil, fmt.Errorf("invalid number of list elements: %v", c)
}
}
func decodeShort(buf []byte) (node, error) {
kbuf, rest, err := rlp.SplitString(buf)
func decodeShort(hash, buf, elems []byte) (node, error) {
kbuf, rest, err := rlp.SplitString(elems)
if err != nil {
return nil, err
}
@ -109,30 +127,30 @@ func decodeShort(buf []byte) (node, error) {
if err != nil {
return nil, fmt.Errorf("invalid value node: %v", err)
}
return shortNode{key, valueNode(val)}, nil
return shortNode{key, valueNode(val), hash, false}, nil
}
r, _, err := decodeRef(rest)
if err != nil {
return nil, wrapError(err, "val")
}
return shortNode{key, r}, nil
return shortNode{key, r, hash, false}, nil
}
func decodeFull(buf []byte) (fullNode, error) {
var n fullNode
func decodeFull(hash, buf, elems []byte) (fullNode, error) {
n := fullNode{hash: hash}
for i := 0; i < 16; i++ {
cld, rest, err := decodeRef(buf)
cld, rest, err := decodeRef(elems)
if err != nil {
return n, wrapError(err, fmt.Sprintf("[%d]", i))
}
n[i], buf = cld, rest
n.Children[i], elems = cld, rest
}
val, _, err := rlp.SplitString(buf)
val, _, err := rlp.SplitString(elems)
if err != nil {
return n, err
}
if len(val) > 0 {
n[16] = valueNode(val)
n.Children[16] = valueNode(val)
}
return n, nil
}
@ -152,7 +170,7 @@ func decodeRef(buf []byte) (node, []byte, error) {
err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
return nil, buf, err
}
n, err := decodeNode(buf)
n, err := decodeNode(nil, buf)
return n, rest, err
case kind == rlp.String && len(val) == 0:
// empty node

View File

@ -54,7 +54,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
}
nodes = append(nodes, n)
case fullNode:
tn = n[key[0]]
tn = n.Children[key[0]]
key = key[1:]
nodes = append(nodes, n)
case hashNode:
@ -77,7 +77,7 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue {
for i, n := range nodes {
// Don't bother checking for errors here since hasher panics
// if encoding doesn't work and we're not writing to any database.
n, _ = t.hasher.replaceChildren(n, nil)
n, _, _ = t.hasher.hashChildren(n, nil)
hn, _ := t.hasher.store(n, nil, false)
if _, ok := hn.(hashNode); ok || i == 0 {
// If the node's database encoding is a hash (or is the
@ -103,7 +103,7 @@ func VerifyProof(rootHash common.Hash, key []byte, proof []rlp.RawValue) (value
if !bytes.Equal(sha.Sum(nil), wantHash) {
return nil, fmt.Errorf("bad proof node %d: hash mismatch", i)
}
n, err := decodeNode(buf)
n, err := decodeNode(wantHash, buf)
if err != nil {
return nil, fmt.Errorf("bad proof node %d: %v", i, err)
}
@ -139,7 +139,7 @@ func get(tn node, key []byte) ([]byte, node) {
tn = n.Val
key = key[len(n.Key):]
case fullNode:
tn = n[key[0]]
tn = n.Children[key[0]]
key = key[1:]
case hashNode:
return key, n

View File

@ -162,11 +162,11 @@ func (t *SecureTrie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
}
t.secKeyCache = make(map[string][]byte)
}
n, err := t.hashRoot(db)
n, clean, err := t.hashRoot(db)
if err != nil {
return (common.Hash{}), err
}
t.root = n
t.root = clean
return common.BytesToHash(n.(hashNode)), nil
}

View File

@ -17,6 +17,7 @@
package trie
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
@ -24,6 +25,10 @@ import (
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
// ErrNotRequested is returned by the trie sync when it's requested to process a
// node it did not request.
var ErrNotRequested = errors.New("not requested")
// request represents a scheduled or already in-flight state retrieval request.
type request struct {
hash common.Hash // Hash of the node data content to retrieve
@ -75,8 +80,9 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c
if root == emptyRoot {
return
}
blob, _ := s.database.Get(root.Bytes())
if local, err := decodeNode(blob); local != nil && err == nil {
key := root.Bytes()
blob, _ := s.database.Get(key)
if local, err := decodeNode(key, blob); local != nil && err == nil {
return
}
// Assemble the new sub-trie sync request
@ -143,7 +149,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
// If the item was not requested, bail out
request := s.requests[item.Hash]
if request == nil {
return i, fmt.Errorf("not requested: %x", item.Hash)
return i, ErrNotRequested
}
// If the item is a raw entry request, commit directly
if request.object == nil {
@ -152,7 +158,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
continue
}
// Decode the node data content and update the request
node, err := decodeNode(item.Data)
node, err := decodeNode(item.Hash[:], item.Data)
if err != nil {
return i, err
}
@ -213,9 +219,9 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
}}
case fullNode:
for i := 0; i < 17; i++ {
if node[i] != nil {
if node.Children[i] != nil {
children = append(children, child{
node: &node[i],
node: &node.Children[i],
depth: req.depth + 1,
})
}
@ -238,7 +244,7 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
if node, ok := (*child.node).(hashNode); ok {
// Try to resolve the node from the local database
blob, _ := s.database.Get(node)
if local, err := decodeNode(blob); local != nil && err == nil {
if local, err := decodeNode(node[:], blob); local != nil && err == nil {
*child.node = local
continue
}

View File

@ -129,7 +129,7 @@ func (t *Trie) TryGet(key []byte) ([]byte, error) {
tn = n.Val
pos += len(n.Key)
case fullNode:
tn = n[key[pos]]
tn = n.Children[key[pos]]
pos++
case nil:
return nil, nil
@ -169,13 +169,13 @@ func (t *Trie) Update(key, value []byte) {
func (t *Trie) TryUpdate(key, value []byte) error {
k := compactHexDecode(key)
if len(value) != 0 {
n, err := t.insert(t.root, nil, k, valueNode(value))
_, n, err := t.insert(t.root, nil, k, valueNode(value))
if err != nil {
return err
}
t.root = n
} else {
n, err := t.delete(t.root, nil, k)
_, n, err := t.delete(t.root, nil, k)
if err != nil {
return err
}
@ -184,9 +184,12 @@ func (t *Trie) TryUpdate(key, value []byte) error {
return nil
}
func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) {
func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) {
if len(key) == 0 {
return value, nil
if v, ok := n.(valueNode); ok {
return !bytes.Equal(v, value.(valueNode)), value, nil
}
return true, value, nil
}
switch n := n.(type) {
case shortNode:
@ -194,53 +197,63 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (node, error) {
// If the whole key matches, keep this short node as is
// and only update the value.
if matchlen == len(n.Key) {
nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
if err != nil {
return nil, err
return false, nil, err
}
return shortNode{n.Key, nn}, nil
if !dirty {
return false, n, nil
}
return true, shortNode{n.Key, nn, nil, true}, nil
}
// Otherwise branch out at the index where they differ.
var branch fullNode
branch := fullNode{dirty: true}
var err error
branch[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
_, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
if err != nil {
return nil, err
return false, nil, err
}
branch[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
_, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
if err != nil {
return nil, err
return false, nil, err
}
// Replace this shortNode with the branch if it occurs at index 0.
if matchlen == 0 {
return branch, nil
return true, branch, nil
}
// Otherwise, replace it with a short node leading up to the branch.
return shortNode{key[:matchlen], branch}, nil
return true, shortNode{key[:matchlen], branch, nil, true}, nil
case fullNode:
nn, err := t.insert(n[key[0]], append(prefix, key[0]), key[1:], value)
dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
if err != nil {
return nil, err
return false, nil, err
}
n[key[0]] = nn
return n, nil
if !dirty {
return false, n, nil
}
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
return true, n, nil
case nil:
return shortNode{key, value}, nil
return true, shortNode{key, value, nil, true}, nil
case hashNode:
// We've hit a part of the trie that isn't loaded yet. Load
// the node and insert into it. This leaves all child nodes on
// the path to the value in the trie.
//
// TODO: track whether insertion changed the value and keep
// n as a hash node if it didn't.
rn, err := t.resolveHash(n, prefix, key)
if err != nil {
return nil, err
return false, nil, err
}
return t.insert(rn, prefix, key, value)
dirty, nn, err := t.insert(rn, prefix, key, value)
if err != nil {
return false, nil, err
}
if !dirty {
return false, rn, nil
}
return true, nn, nil
default:
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
@ -258,7 +271,7 @@ func (t *Trie) Delete(key []byte) {
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryDelete(key []byte) error {
k := compactHexDecode(key)
n, err := t.delete(t.root, nil, k)
_, n, err := t.delete(t.root, nil, k)
if err != nil {
return err
}
@ -269,23 +282,26 @@ func (t *Trie) TryDelete(key []byte) error {
// delete returns the new root of the trie with key deleted.
// It reduces the trie to minimal form by simplifying
// nodes on the way up after deleting recursively.
func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
switch n := n.(type) {
case shortNode:
matchlen := prefixLen(key, n.Key)
if matchlen < len(n.Key) {
return n, nil // don't replace n on mismatch
return false, n, nil // don't replace n on mismatch
}
if matchlen == len(key) {
return nil, nil // remove n entirely for whole matches
return true, nil, nil // remove n entirely for whole matches
}
// The key is longer than n.Key. Remove the remaining suffix
// from the subtrie. Child can never be nil here since the
// subtrie must contain at least two other values with keys
// longer than n.Key.
child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
if err != nil {
return nil, err
return false, nil, err
}
if !dirty {
return false, n, nil
}
switch child := child.(type) {
case shortNode:
@ -295,17 +311,21 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
// always creates a new slice) instead of append to
// avoid modifying n.Key since it might be shared with
// other nodes.
return shortNode{concat(n.Key, child.Key...), child.Val}, nil
return true, shortNode{concat(n.Key, child.Key...), child.Val, nil, true}, nil
default:
return shortNode{n.Key, child}, nil
return true, shortNode{n.Key, child, nil, true}, nil
}
case fullNode:
nn, err := t.delete(n[key[0]], append(prefix, key[0]), key[1:])
dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
if err != nil {
return nil, err
return false, nil, err
}
n[key[0]] = nn
if !dirty {
return false, n, nil
}
n.Children[key[0]], n.hash, n.dirty = nn, nil, true
// Check how many non-nil entries are left after deleting and
// reduce the full node to a short node if only one entry is
// left. Since n must've contained at least two children
@ -316,7 +336,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
// value that is left in n or -2 if n contains at least two
// values.
pos := -1
for i, cld := range n {
for i, cld := range n.Children {
if cld != nil {
if pos == -1 {
pos = i
@ -334,37 +354,41 @@ func (t *Trie) delete(n node, prefix, key []byte) (node, error) {
// shortNode{..., shortNode{...}}. Since the entry
// might not be loaded yet, resolve it just for this
// check.
cnode, err := t.resolve(n[pos], prefix, []byte{byte(pos)})
cnode, err := t.resolve(n.Children[pos], prefix, []byte{byte(pos)})
if err != nil {
return nil, err
return false, nil, err
}
if cnode, ok := cnode.(shortNode); ok {
k := append([]byte{byte(pos)}, cnode.Key...)
return shortNode{k, cnode.Val}, nil
return true, shortNode{k, cnode.Val, nil, true}, nil
}
}
// Otherwise, n is replaced by a one-nibble short node
// containing the child.
return shortNode{[]byte{byte(pos)}, n[pos]}, nil
return true, shortNode{[]byte{byte(pos)}, n.Children[pos], nil, true}, nil
}
// n still contains at least two values and cannot be reduced.
return n, nil
return true, n, nil
case nil:
return nil, nil
return false, nil, nil
case hashNode:
// We've hit a part of the trie that isn't loaded yet. Load
// the node and delete from it. This leaves all child nodes on
// the path to the value in the trie.
//
// TODO: track whether deletion actually hit a key and keep
// n as a hash node if it didn't.
rn, err := t.resolveHash(n, prefix, key)
if err != nil {
return nil, err
return false, nil, err
}
return t.delete(rn, prefix, key)
dirty, nn, err := t.delete(rn, prefix, key)
if err != nil {
return false, nil, err
}
if !dirty {
return false, rn, nil
}
return true, nn, nil
default:
panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key))
@ -413,8 +437,9 @@ func (t *Trie) Root() []byte { return t.Hash().Bytes() }
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
root, _ := t.hashRoot(nil)
return common.BytesToHash(root.(hashNode))
hash, cached, _ := t.hashRoot(nil)
t.root = cached
return common.BytesToHash(hash.(hashNode))
}
// Commit writes all nodes to the trie's database.
@ -437,17 +462,17 @@ func (t *Trie) Commit() (root common.Hash, err error) {
// the changes made to db are written back to the trie's attached
// database before using the trie.
func (t *Trie) CommitTo(db DatabaseWriter) (root common.Hash, err error) {
n, err := t.hashRoot(db)
hash, cached, err := t.hashRoot(db)
if err != nil {
return (common.Hash{}), err
}
t.root = n
return common.BytesToHash(n.(hashNode)), nil
t.root = cached
return common.BytesToHash(hash.(hashNode)), nil
}
func (t *Trie) hashRoot(db DatabaseWriter) (node, error) {
func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) {
if t.root == nil {
return hashNode(emptyRoot.Bytes()), nil
return hashNode(emptyRoot.Bytes()), nil, nil
}
if t.hasher == nil {
t.hasher = newHasher()
@ -464,51 +489,87 @@ func newHasher() *hasher {
return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()}
}
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, error) {
hashed, err := h.replaceChildren(n, db)
// hash collapses a node down into a hash node, also returning a copy of the
// original node initialzied with the computed hash to replace the original one.
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
// If we're not storing the node, just hashing, use avaialble cached data
if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) {
return hash, n, nil
}
// Trie not processed yet or needs storage, walk the children
collapsed, cached, err := h.hashChildren(n, db)
if err != nil {
return hashNode{}, err
return hashNode{}, n, err
}
if n, err = h.store(hashed, db, force); err != nil {
return hashNode{}, err
hashed, err := h.store(collapsed, db, force)
if err != nil {
return hashNode{}, n, err
}
return n, nil
// Cache the hash and RLP blob of the ndoe for later reuse
if hash, ok := hashed.(hashNode); ok && !force {
switch cached := cached.(type) {
case shortNode:
cached.hash = hash
if db != nil {
cached.dirty = false
}
return hashed, cached, nil
case fullNode:
cached.hash = hash
if db != nil {
cached.dirty = false
}
return hashed, cached, nil
}
}
return hashed, cached, nil
}
// hashChildren replaces child nodes of n with their hashes if the encoded
// size of the child is larger than a hash.
func (h *hasher) replaceChildren(n node, db DatabaseWriter) (node, error) {
// hashChildren replaces the children of a node with their hashes if the encoded
// size of the child is larger than a hash, returning the collapsed node as well
// as a replacement for the original node with the child hashes cached in.
func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) {
var err error
switch n := n.(type) {
switch n := original.(type) {
case shortNode:
// Hash the short node's child, caching the newly hashed subtree
cached := n
cached.Key = common.CopyBytes(cached.Key)
n.Key = compactEncode(n.Key)
if _, ok := n.Val.(valueNode); !ok {
if n.Val, err = h.hash(n.Val, db, false); err != nil {
return n, err
if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil {
return n, original, err
}
}
if n.Val == nil {
// Ensure that nil children are encoded as empty strings.
n.Val = valueNode(nil)
n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings.
}
return n, nil
return n, cached, nil
case fullNode:
// Hash the full node's children, caching the newly hashed subtrees
cached := fullNode{dirty: n.dirty}
for i := 0; i < 16; i++ {
if n[i] != nil {
if n[i], err = h.hash(n[i], db, false); err != nil {
return n, err
if n.Children[i] != nil {
if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil {
return n, original, err
}
} else {
// Ensure that nil children are encoded as empty strings.
n[i] = valueNode(nil)
n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings.
}
}
if n[16] == nil {
n[16] = valueNode(nil)
cached.Children[16] = n.Children[16]
if n.Children[16] == nil {
n.Children[16] = valueNode(nil)
}
return n, nil
return n, cached, nil
default:
return n, nil
// Value and hash nodes don't have children so they're left as were
return n, original, nil
}
}
@ -517,21 +578,23 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
if _, isHash := n.(hashNode); n == nil || isHash {
return n, nil
}
// Generate the RLP encoding of the node
h.tmp.Reset()
if err := rlp.Encode(h.tmp, n); err != nil {
panic("encode error: " + err.Error())
}
if h.tmp.Len() < 32 && !force {
// Nodes smaller than 32 bytes are stored inside their parent.
return n, nil
return n, nil // Nodes smaller than 32 bytes are stored inside their parent
}
// Larger nodes are replaced by their hash and stored in the database.
h.sha.Reset()
h.sha.Write(h.tmp.Bytes())
key := hashNode(h.sha.Sum(nil))
if db != nil {
err := db.Put(key, h.tmp.Bytes())
return key, err
hash, _ := n.cache()
if hash == nil {
h.sha.Reset()
h.sha.Write(h.tmp.Bytes())
hash = hashNode(h.sha.Sum(nil))
}
return key, nil
if db != nil {
return hash, db.Put(hash, h.tmp.Bytes())
}
return hash, nil
}

View File

@ -150,8 +150,7 @@ func (self *Whisper) GetIdentity(key *ecdsa.PublicKey) *ecdsa.PrivateKey {
func (self *Whisper) InjectIdentity(key *ecdsa.PrivateKey) error {
identity := string(crypto.FromECDSAPub(&key.PublicKey))
keyCopy := *key
self.keys[identity] = &keyCopy
self.keys[identity] = key
if _, ok := self.keys[identity]; !ok {
return fmt.Errorf("key insert into keys map failed")
}

View File

@ -1,5 +0,0 @@
language: go
go:
- 1.6
- tip

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,154 +0,0 @@
# Color [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/color) [![Build Status](http://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
has support for Windows too! The API can be used in several ways, pick one that
suits you.
![Color](http://i.imgur.com/c1JI0lA.png)
## Install
```bash
go get github.com/fatih/color
```
## Examples
### Standard colors
```go
// Print with default helper functions
color.Cyan("Prints text in cyan.")
// A newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// These are using the default foreground colors
color.Red("We have red")
color.Magenta("And many others ..")
```
### Mix and reuse colors
```go
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with white background.")
```
### Custom print functions (PrintFunc)
```go
// Create a custom print function for convenience
red := color.New(color.FgRed).PrintfFunc()
red("Warning")
red("Error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("Don't forget this...")
```
### Insert into noncolor strings (SprintFunc)
```go
// Create SprintXxx functions to mix strings with other non-colorized strings:
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
fmt.Printf("This %s rocks!\n", info("package"))
// Use helper functions
fmt.Printf("This", color.RedString("warning"), "should be not neglected.")
fmt.Printf(color.GreenString("Info:"), "an important message." )
// Windows supported too! Just don't forget to change the output to color.Output
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
```
### Plug into existing code
```go
// Use handy standard colors
color.Set(color.FgYellow)
fmt.Println("Existing text will now be in yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // Don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // Use it in your function
fmt.Println("All text will now be bold magenta.")
```
### Disable color
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
```go
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
```
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
```go
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
```
## Todo
* Save/Return previous values
* Evaluate fmt.Formatter interface
## Credits
* [Fatih Arslan](https://github.com/fatih)
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
## License
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details

View File

@ -1,402 +0,0 @@
package color
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
var Output = colorable.NewColorableStdout()
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Print(a...) }
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) { c.Printf(format, a...) }
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Println(a...) }
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjuction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
func printColor(format string, p Attribute, a ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
c := &Color{params: []Attribute{p}}
c.Printf(format, a...)
}
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string {
return New(FgBlack).SprintfFunc()(format, a...)
}
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string {
return New(FgRed).SprintfFunc()(format, a...)
}
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string {
return New(FgGreen).SprintfFunc()(format, a...)
}
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string {
return New(FgYellow).SprintfFunc()(format, a...)
}
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string {
return New(FgBlue).SprintfFunc()(format, a...)
}
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return New(FgMagenta).SprintfFunc()(format, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string {
return New(FgCyan).SprintfFunc()(format, a...)
}
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string {
return New(FgWhite).SprintfFunc()(format, a...)
}

View File

@ -1,114 +0,0 @@
/*
Package color is an ANSI color package to output colorized or SGR defined
output to the standard output. The API can be used in several way, pick one
that suits you.
Use simple and default helper functions with predefined foreground colors:
color.Cyan("Prints text in cyan.")
// a newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// More default foreground colors..
color.Red("We have red")
color.Yellow("Yellow color too!")
color.Magenta("And many others ..")
However there are times where custom color mixes are required. Below are some
examples to create custom color objects and use the print functions of each
separate color object.
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with White background.")
You can create PrintXxx functions to simplify even more:
// Create a custom print function for convenient
red := color.New(color.FgRed).PrintfFunc()
red("warning")
red("error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("don't forget this...")
Or create SprintXxx functions to mix strings with other non-colorized strings:
yellow := New(FgYellow).SprintFunc()
red := New(FgRed).SprintFunc()
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Printf("this %s rocks!\n", info("package"))
Windows support is enabled by default. All Print functions works as intended.
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
set the output to color.Output:
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
Using with existing code is possible. Just use the Set() method to set the
standard output to the given parameters. That way a rewrite of an existing
code is not required.
// Use handy standard colors.
color.Set(color.FgYellow)
fmt.Println("Existing text will be now in Yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // use it in your function
fmt.Println("All text will be now bold magenta.")
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
*/
package color

View File

@ -1,43 +0,0 @@
# go-colorable
Colorable writer for windows.
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
This package is possible to handle escape sequence for ansi color on windows.
## Too Bad!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
## So Good!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
## Usage
```go
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
logrus.SetOutput(colorable.NewColorableStdout())
logrus.Info("succeeded")
logrus.Warn("not correct")
logrus.Error("something error")
logrus.Fatal("panic")
```
You can compile above code on non-windows OSs.
## Installation
```
$ go get github.com/mattn/go-colorable
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -1,24 +0,0 @@
// +build !windows
package colorable
import (
"io"
"os"
)
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
func NewColorableStdout() io.Writer {
return os.Stdout
}
func NewColorableStderr() io.Writer {
return os.Stderr
}

View File

@ -1,783 +0,0 @@
package colorable
import (
"bytes"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/mattn/go-isatty"
)
const (
foregroundBlue = 0x1
foregroundGreen = 0x2
foregroundRed = 0x4
foregroundIntensity = 0x8
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
backgroundBlue = 0x10
backgroundGreen = 0x20
backgroundRed = 0x40
backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
)
type wchar uint16
type short int16
type dword uint32
type word uint16
type coord struct {
x short
y short
}
type smallRect struct {
left short
top short
right short
bottom short
}
type consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
)
type Writer struct {
out io.Writer
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
}
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
if isatty.IsTerminal(file.Fd()) {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
} else {
return file
}
}
func NewColorableStdout() io.Writer {
return NewColorable(os.Stdout)
}
func NewColorableStderr() io.Writer {
return NewColorable(os.Stderr)
}
var color256 = map[int]int{
0: 0x000000,
1: 0x800000,
2: 0x008000,
3: 0x808000,
4: 0x000080,
5: 0x800080,
6: 0x008080,
7: 0xc0c0c0,
8: 0x808080,
9: 0xff0000,
10: 0x00ff00,
11: 0xffff00,
12: 0x0000ff,
13: 0xff00ff,
14: 0x00ffff,
15: 0xffffff,
16: 0x000000,
17: 0x00005f,
18: 0x000087,
19: 0x0000af,
20: 0x0000d7,
21: 0x0000ff,
22: 0x005f00,
23: 0x005f5f,
24: 0x005f87,
25: 0x005faf,
26: 0x005fd7,
27: 0x005fff,
28: 0x008700,
29: 0x00875f,
30: 0x008787,
31: 0x0087af,
32: 0x0087d7,
33: 0x0087ff,
34: 0x00af00,
35: 0x00af5f,
36: 0x00af87,
37: 0x00afaf,
38: 0x00afd7,
39: 0x00afff,
40: 0x00d700,
41: 0x00d75f,
42: 0x00d787,
43: 0x00d7af,
44: 0x00d7d7,
45: 0x00d7ff,
46: 0x00ff00,
47: 0x00ff5f,
48: 0x00ff87,
49: 0x00ffaf,
50: 0x00ffd7,
51: 0x00ffff,
52: 0x5f0000,
53: 0x5f005f,
54: 0x5f0087,
55: 0x5f00af,
56: 0x5f00d7,
57: 0x5f00ff,
58: 0x5f5f00,
59: 0x5f5f5f,
60: 0x5f5f87,
61: 0x5f5faf,
62: 0x5f5fd7,
63: 0x5f5fff,
64: 0x5f8700,
65: 0x5f875f,
66: 0x5f8787,
67: 0x5f87af,
68: 0x5f87d7,
69: 0x5f87ff,
70: 0x5faf00,
71: 0x5faf5f,
72: 0x5faf87,
73: 0x5fafaf,
74: 0x5fafd7,
75: 0x5fafff,
76: 0x5fd700,
77: 0x5fd75f,
78: 0x5fd787,
79: 0x5fd7af,
80: 0x5fd7d7,
81: 0x5fd7ff,
82: 0x5fff00,
83: 0x5fff5f,
84: 0x5fff87,
85: 0x5fffaf,
86: 0x5fffd7,
87: 0x5fffff,
88: 0x870000,
89: 0x87005f,
90: 0x870087,
91: 0x8700af,
92: 0x8700d7,
93: 0x8700ff,
94: 0x875f00,
95: 0x875f5f,
96: 0x875f87,
97: 0x875faf,
98: 0x875fd7,
99: 0x875fff,
100: 0x878700,
101: 0x87875f,
102: 0x878787,
103: 0x8787af,
104: 0x8787d7,
105: 0x8787ff,
106: 0x87af00,
107: 0x87af5f,
108: 0x87af87,
109: 0x87afaf,
110: 0x87afd7,
111: 0x87afff,
112: 0x87d700,
113: 0x87d75f,
114: 0x87d787,
115: 0x87d7af,
116: 0x87d7d7,
117: 0x87d7ff,
118: 0x87ff00,
119: 0x87ff5f,
120: 0x87ff87,
121: 0x87ffaf,
122: 0x87ffd7,
123: 0x87ffff,
124: 0xaf0000,
125: 0xaf005f,
126: 0xaf0087,
127: 0xaf00af,
128: 0xaf00d7,
129: 0xaf00ff,
130: 0xaf5f00,
131: 0xaf5f5f,
132: 0xaf5f87,
133: 0xaf5faf,
134: 0xaf5fd7,
135: 0xaf5fff,
136: 0xaf8700,
137: 0xaf875f,
138: 0xaf8787,
139: 0xaf87af,
140: 0xaf87d7,
141: 0xaf87ff,
142: 0xafaf00,
143: 0xafaf5f,
144: 0xafaf87,
145: 0xafafaf,
146: 0xafafd7,
147: 0xafafff,
148: 0xafd700,
149: 0xafd75f,
150: 0xafd787,
151: 0xafd7af,
152: 0xafd7d7,
153: 0xafd7ff,
154: 0xafff00,
155: 0xafff5f,
156: 0xafff87,
157: 0xafffaf,
158: 0xafffd7,
159: 0xafffff,
160: 0xd70000,
161: 0xd7005f,
162: 0xd70087,
163: 0xd700af,
164: 0xd700d7,
165: 0xd700ff,
166: 0xd75f00,
167: 0xd75f5f,
168: 0xd75f87,
169: 0xd75faf,
170: 0xd75fd7,
171: 0xd75fff,
172: 0xd78700,
173: 0xd7875f,
174: 0xd78787,
175: 0xd787af,
176: 0xd787d7,
177: 0xd787ff,
178: 0xd7af00,
179: 0xd7af5f,
180: 0xd7af87,
181: 0xd7afaf,
182: 0xd7afd7,
183: 0xd7afff,
184: 0xd7d700,
185: 0xd7d75f,
186: 0xd7d787,
187: 0xd7d7af,
188: 0xd7d7d7,
189: 0xd7d7ff,
190: 0xd7ff00,
191: 0xd7ff5f,
192: 0xd7ff87,
193: 0xd7ffaf,
194: 0xd7ffd7,
195: 0xd7ffff,
196: 0xff0000,
197: 0xff005f,
198: 0xff0087,
199: 0xff00af,
200: 0xff00d7,
201: 0xff00ff,
202: 0xff5f00,
203: 0xff5f5f,
204: 0xff5f87,
205: 0xff5faf,
206: 0xff5fd7,
207: 0xff5fff,
208: 0xff8700,
209: 0xff875f,
210: 0xff8787,
211: 0xff87af,
212: 0xff87d7,
213: 0xff87ff,
214: 0xffaf00,
215: 0xffaf5f,
216: 0xffaf87,
217: 0xffafaf,
218: 0xffafd7,
219: 0xffafff,
220: 0xffd700,
221: 0xffd75f,
222: 0xffd787,
223: 0xffd7af,
224: 0xffd7d7,
225: 0xffd7ff,
226: 0xffff00,
227: 0xffff5f,
228: 0xffff87,
229: 0xffffaf,
230: 0xffffd7,
231: 0xffffff,
232: 0x080808,
233: 0x121212,
234: 0x1c1c1c,
235: 0x262626,
236: 0x303030,
237: 0x3a3a3a,
238: 0x444444,
239: 0x4e4e4e,
240: 0x585858,
241: 0x626262,
242: 0x6c6c6c,
243: 0x767676,
244: 0x808080,
245: 0x8a8a8a,
246: 0x949494,
247: 0x9e9e9e,
248: 0xa8a8a8,
249: 0xb2b2b2,
250: 0xbcbcbc,
251: 0xc6c6c6,
252: 0xd0d0d0,
253: 0xdadada,
254: 0xe4e4e4,
255: 0xeeeeee,
}
func (w *Writer) Write(data []byte) (n int, err error) {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
er := bytes.NewBuffer(data)
loop:
for {
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
if r1 == 0 {
break loop
}
c1, _, err := er.ReadRune()
if err != nil {
break loop
}
if c1 != 0x1b {
fmt.Fprint(w.out, string(c1))
continue
}
c2, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
break loop
}
if c2 != 0x5b {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
continue
}
var buf bytes.Buffer
var m rune
for {
c, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
w.lastbuf.Write(buf.Bytes())
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
m = c
break
}
buf.Write([]byte(string(c)))
}
var csbi consoleScreenBufferInfo
switch m {
case 'A':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.y -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'B':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.y += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'C':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'D':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
if n, err = strconv.Atoi(buf.String()); err == nil {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
}
case 'E':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = 0
csbi.cursorPosition.y += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'F':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = 0
csbi.cursorPosition.y -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'G':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'H':
token := strings.Split(buf.String(), ";")
if len(token) != 2 {
continue
}
n1, err := strconv.Atoi(token[0])
if err != nil {
continue
}
n2, err := strconv.Atoi(token[1])
if err != nil {
continue
}
csbi.cursorPosition.x = short(n2)
csbi.cursorPosition.x = short(n1)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'J':
n, err := strconv.Atoi(buf.String())
if err != nil {
continue
}
var cursor coord
switch n {
case 0:
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
case 1:
cursor = coord{x: csbi.window.left, y: csbi.window.top}
case 2:
cursor = coord{x: csbi.window.left, y: csbi.window.top}
}
var count, written dword
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
case 'K':
n, err := strconv.Atoi(buf.String())
if err != nil {
continue
}
var cursor coord
switch n {
case 0:
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
case 1:
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
case 2:
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
}
var count, written dword
count = dword(csbi.size.x - csbi.cursorPosition.x)
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
case 'm':
attr := csbi.attributes
cs := buf.String()
if cs == "" {
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
continue
}
token := strings.Split(cs, ";")
for i := 0; i < len(token); i += 1 {
ns := token[i]
if n, err = strconv.Atoi(ns); err == nil {
switch {
case n == 0 || n == 100:
attr = w.oldattr
case 1 <= n && n <= 5:
attr |= foregroundIntensity
case n == 7:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 22 == n || n == 25 || n == 25:
attr |= foregroundIntensity
case n == 27:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 30 <= n && n <= 37:
attr = (attr & backgroundMask)
if (n-30)&1 != 0 {
attr |= foregroundRed
}
if (n-30)&2 != 0 {
attr |= foregroundGreen
}
if (n-30)&4 != 0 {
attr |= foregroundBlue
}
case n == 38: // set foreground color.
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256foreAttr == nil {
n256setup()
}
attr &= backgroundMask
attr |= n256foreAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & backgroundMask)
}
case n == 39: // reset foreground color.
attr &= backgroundMask
attr |= w.oldattr & foregroundMask
case 40 <= n && n <= 47:
attr = (attr & foregroundMask)
if (n-40)&1 != 0 {
attr |= backgroundRed
}
if (n-40)&2 != 0 {
attr |= backgroundGreen
}
if (n-40)&4 != 0 {
attr |= backgroundBlue
}
case n == 48: // set background color.
if i < len(token)-2 && token[i+1] == "5" {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256backAttr == nil {
n256setup()
}
attr &= foregroundMask
attr |= n256backAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & foregroundMask)
}
case n == 49: // reset foreground color.
attr &= foregroundMask
attr |= w.oldattr & backgroundMask
case 90 <= n && n <= 97:
attr = (attr & backgroundMask)
attr |= foregroundIntensity
if (n-90)&1 != 0 {
attr |= foregroundRed
}
if (n-90)&2 != 0 {
attr |= foregroundGreen
}
if (n-90)&4 != 0 {
attr |= foregroundBlue
}
case 100 <= n && n <= 107:
attr = (attr & foregroundMask)
attr |= backgroundIntensity
if (n-100)&1 != 0 {
attr |= backgroundRed
}
if (n-100)&2 != 0 {
attr |= backgroundGreen
}
if (n-100)&4 != 0 {
attr |= backgroundBlue
}
}
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
}
}
}
}
return len(data) - w.lastbuf.Len(), nil
}
type consoleColor struct {
rgb int
red bool
green bool
blue bool
intensity bool
}
func (c consoleColor) foregroundAttr() (attr word) {
if c.red {
attr |= foregroundRed
}
if c.green {
attr |= foregroundGreen
}
if c.blue {
attr |= foregroundBlue
}
if c.intensity {
attr |= foregroundIntensity
}
return
}
func (c consoleColor) backgroundAttr() (attr word) {
if c.red {
attr |= backgroundRed
}
if c.green {
attr |= backgroundGreen
}
if c.blue {
attr |= backgroundBlue
}
if c.intensity {
attr |= backgroundIntensity
}
return
}
var color16 = []consoleColor{
consoleColor{0x000000, false, false, false, false},
consoleColor{0x000080, false, false, true, false},
consoleColor{0x008000, false, true, false, false},
consoleColor{0x008080, false, true, true, false},
consoleColor{0x800000, true, false, false, false},
consoleColor{0x800080, true, false, true, false},
consoleColor{0x808000, true, true, false, false},
consoleColor{0xc0c0c0, true, true, true, false},
consoleColor{0x808080, false, false, false, true},
consoleColor{0x0000ff, false, false, true, true},
consoleColor{0x00ff00, false, true, false, true},
consoleColor{0x00ffff, false, true, true, true},
consoleColor{0xff0000, true, false, false, true},
consoleColor{0xff00ff, true, false, true, true},
consoleColor{0xffff00, true, true, false, true},
consoleColor{0xffffff, true, true, true, true},
}
type hsv struct {
h, s, v float32
}
func (a hsv) dist(b hsv) float32 {
dh := a.h - b.h
switch {
case dh > 0.5:
dh = 1 - dh
case dh < -0.5:
dh = -1 - dh
}
ds := a.s - b.s
dv := a.v - b.v
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
}
func toHSV(rgb int) hsv {
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
float32((rgb&0x00FF00)>>8)/256.0,
float32(rgb&0x0000FF)/256.0
min, max := minmax3f(r, g, b)
h := max - min
if h > 0 {
if max == r {
h = (g - b) / h
if h < 0 {
h += 6
}
} else if max == g {
h = 2 + (b-r)/h
} else {
h = 4 + (r-g)/h
}
}
h /= 6.0
s := max - min
if max != 0 {
s /= max
}
v := max
return hsv{h: h, s: s, v: v}
}
type hsvTable []hsv
func toHSVTable(rgbTable []consoleColor) hsvTable {
t := make(hsvTable, len(rgbTable))
for i, c := range rgbTable {
t[i] = toHSV(c.rgb)
}
return t
}
func (t hsvTable) find(rgb int) consoleColor {
hsv := toHSV(rgb)
n := 7
l := float32(5.0)
for i, p := range t {
d := hsv.dist(p)
if d < l {
l, n = d, i
}
}
return color16[n]
}
func minmax3f(a, b, c float32) (min, max float32) {
if a < b {
if b < c {
return a, c
} else if a < c {
return a, b
} else {
return c, b
}
} else {
if a < c {
return b, c
} else if b < c {
return b, a
} else {
return c, a
}
}
}
var n256foreAttr []word
var n256backAttr []word
func n256setup() {
n256foreAttr = make([]word, 256)
n256backAttr = make([]word, 256)
t := toHSVTable(color16)
for i, rgb := range color256 {
c := t.find(rgb)
n256foreAttr[i] = c.foregroundAttr()
n256backAttr[i] = c.backgroundAttr()
}
}

View File

@ -1,9 +0,0 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,37 +0,0 @@
# go-isatty
isatty for golang
## Usage
```go
package main
import (
"fmt"
"github.com/mattn/go-isatty"
"os"
)
func main() {
if isatty.IsTerminal(os.Stdout.Fd()) {
fmt.Println("Is Terminal")
} else {
fmt.Println("Is Not Terminal")
}
}
```
## Installation
```
$ go get github.com/mattn/go-isatty
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -1,2 +0,0 @@
// Package isatty implements interface to isatty
package isatty

Some files were not shown because too many files have changed in this diff Show More